2024-11-24 08:28:14,742 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-24 08:28:14,759 main DEBUG Took 0.014010 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-24 08:28:14,759 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-24 08:28:14,760 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-24 08:28:14,761 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-24 08:28:14,762 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:28:14,772 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-24 08:28:14,788 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:28:14,789 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:28:14,790 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:28:14,791 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:28:14,791 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:28:14,791 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:28:14,792 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:28:14,793 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:28:14,793 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:28:14,794 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:28:14,795 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:28:14,795 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:28:14,796 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:28:14,796 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:28:14,797 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:28:14,797 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:28:14,798 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:28:14,798 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:28:14,799 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:28:14,799 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:28:14,800 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:28:14,800 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:28:14,801 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:28:14,802 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 08:28:14,802 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:28:14,803 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-24 08:28:14,804 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 08:28:14,806 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-24 08:28:14,808 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-24 08:28:14,809 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-24 08:28:14,810 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-24 08:28:14,810 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-24 08:28:14,822 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-24 08:28:14,826 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-24 08:28:14,828 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-24 08:28:14,828 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-24 08:28:14,829 main DEBUG createAppenders(={Console}) 2024-11-24 08:28:14,830 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-24 08:28:14,830 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-24 08:28:14,831 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-24 08:28:14,831 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-24 08:28:14,832 main DEBUG OutputStream closed 2024-11-24 08:28:14,832 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-24 08:28:14,832 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-24 08:28:14,833 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-24 08:28:14,908 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-24 08:28:14,910 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-24 08:28:14,911 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-24 08:28:14,912 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-24 08:28:14,913 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-24 08:28:14,913 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-24 08:28:14,914 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-24 08:28:14,914 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-24 08:28:14,915 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-24 08:28:14,915 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-24 08:28:14,916 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-24 08:28:14,916 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-24 08:28:14,917 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-24 08:28:14,917 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-24 08:28:14,918 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-24 08:28:14,918 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-24 08:28:14,918 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-24 08:28:14,919 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-24 08:28:14,922 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-24 08:28:14,922 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-24 08:28:14,923 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-24 08:28:14,924 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-24T08:28:15,191 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916 2024-11-24 08:28:15,194 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-24 08:28:15,195 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-24T08:28:15,204 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-24T08:28:15,242 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=185, ProcessCount=11, AvailableMemoryMB=7077 2024-11-24T08:28:15,246 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T08:28:15,265 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/cluster_85508b28-1dc5-893e-0cc9-827ba1015c20, deleteOnExit=true 2024-11-24T08:28:15,265 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T08:28:15,266 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/test.cache.data in system properties and HBase conf 2024-11-24T08:28:15,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T08:28:15,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/hadoop.log.dir in system properties and HBase conf 2024-11-24T08:28:15,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T08:28:15,269 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T08:28:15,270 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T08:28:15,368 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-24T08:28:15,476 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T08:28:15,481 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:28:15,482 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:28:15,482 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T08:28:15,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:28:15,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T08:28:15,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T08:28:15,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:28:15,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:28:15,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T08:28:15,486 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/nfs.dump.dir in system properties and HBase conf 2024-11-24T08:28:15,486 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/java.io.tmpdir in system properties and HBase conf 2024-11-24T08:28:15,486 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:28:15,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T08:28:15,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T08:28:16,011 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:28:16,387 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-24T08:28:16,476 INFO [Time-limited test {}] log.Log(170): Logging initialized @2597ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-24T08:28:16,560 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:28:16,629 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:28:16,652 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:28:16,653 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:28:16,654 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:28:16,668 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:28:16,671 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:28:16,672 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:28:16,888 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/java.io.tmpdir/jetty-localhost-44701-hadoop-hdfs-3_4_1-tests_jar-_-any-16319670450495496391/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:28:16,895 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:44701} 2024-11-24T08:28:16,895 INFO [Time-limited test {}] server.Server(415): Started @3018ms 2024-11-24T08:28:16,920 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:28:17,302 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:28:17,309 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:28:17,311 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:28:17,312 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:28:17,312 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:28:17,313 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:28:17,314 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:28:17,434 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/java.io.tmpdir/jetty-localhost-37211-hadoop-hdfs-3_4_1-tests_jar-_-any-12727969585993350906/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:28:17,435 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:37211} 2024-11-24T08:28:17,435 INFO [Time-limited test {}] server.Server(415): Started @3558ms 2024-11-24T08:28:17,493 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:28:17,648 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:28:17,654 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:28:17,655 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:28:17,655 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:28:17,656 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:28:17,656 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:28:17,657 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:28:17,780 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/java.io.tmpdir/jetty-localhost-39773-hadoop-hdfs-3_4_1-tests_jar-_-any-15444238762853666951/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:28:17,781 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:39773} 2024-11-24T08:28:17,781 INFO [Time-limited test {}] server.Server(415): Started @3904ms 2024-11-24T08:28:17,784 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:28:17,985 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/cluster_85508b28-1dc5-893e-0cc9-827ba1015c20/data/data1/current/BP-447478019-172.17.0.2-1732436896116/current, will proceed with Du for space computation calculation, 2024-11-24T08:28:17,985 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/cluster_85508b28-1dc5-893e-0cc9-827ba1015c20/data/data3/current/BP-447478019-172.17.0.2-1732436896116/current, will proceed with Du for space computation calculation, 2024-11-24T08:28:17,985 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/cluster_85508b28-1dc5-893e-0cc9-827ba1015c20/data/data2/current/BP-447478019-172.17.0.2-1732436896116/current, will proceed with Du for space computation calculation, 2024-11-24T08:28:17,985 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/cluster_85508b28-1dc5-893e-0cc9-827ba1015c20/data/data4/current/BP-447478019-172.17.0.2-1732436896116/current, will proceed with Du for space computation calculation, 2024-11-24T08:28:18,042 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:28:18,044 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:28:18,115 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbec70007194b342 with lease ID 0xefeae7219bcdd9ed: Processing first storage report for DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c from datanode DatanodeRegistration(127.0.0.1:45079, datanodeUuid=ffb2e774-6d02-4471-ac2d-61106e0b6ff7, infoPort=41647, infoSecurePort=0, ipcPort=38667, storageInfo=lv=-57;cid=testClusterID;nsid=1914024545;c=1732436896116) 2024-11-24T08:28:18,117 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbec70007194b342 with lease ID 0xefeae7219bcdd9ed: from storage DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c node DatanodeRegistration(127.0.0.1:45079, datanodeUuid=ffb2e774-6d02-4471-ac2d-61106e0b6ff7, infoPort=41647, infoSecurePort=0, ipcPort=38667, storageInfo=lv=-57;cid=testClusterID;nsid=1914024545;c=1732436896116), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T08:28:18,117 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa49d1be964a8e27f with lease ID 0xefeae7219bcdd9ec: Processing first storage report for DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3 from datanode DatanodeRegistration(127.0.0.1:45027, datanodeUuid=c7ce254c-1eb9-492a-92f8-bb60a797fa6f, infoPort=35725, infoSecurePort=0, ipcPort=46581, storageInfo=lv=-57;cid=testClusterID;nsid=1914024545;c=1732436896116) 2024-11-24T08:28:18,117 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa49d1be964a8e27f with lease ID 0xefeae7219bcdd9ec: from storage DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3 node DatanodeRegistration(127.0.0.1:45027, datanodeUuid=c7ce254c-1eb9-492a-92f8-bb60a797fa6f, infoPort=35725, infoSecurePort=0, ipcPort=46581, storageInfo=lv=-57;cid=testClusterID;nsid=1914024545;c=1732436896116), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:28:18,117 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbec70007194b342 with lease ID 0xefeae7219bcdd9ed: Processing first storage report for DS-80d7619b-2d13-46c9-a477-9ac5d62642d5 from datanode DatanodeRegistration(127.0.0.1:45079, datanodeUuid=ffb2e774-6d02-4471-ac2d-61106e0b6ff7, infoPort=41647, infoSecurePort=0, ipcPort=38667, storageInfo=lv=-57;cid=testClusterID;nsid=1914024545;c=1732436896116) 2024-11-24T08:28:18,118 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbec70007194b342 with lease ID 0xefeae7219bcdd9ed: from storage DS-80d7619b-2d13-46c9-a477-9ac5d62642d5 node DatanodeRegistration(127.0.0.1:45079, datanodeUuid=ffb2e774-6d02-4471-ac2d-61106e0b6ff7, infoPort=41647, infoSecurePort=0, ipcPort=38667, storageInfo=lv=-57;cid=testClusterID;nsid=1914024545;c=1732436896116), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T08:28:18,118 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa49d1be964a8e27f with lease ID 0xefeae7219bcdd9ec: Processing first storage report for DS-9fd78f13-a204-43da-8d7e-b2637707712b from datanode DatanodeRegistration(127.0.0.1:45027, datanodeUuid=c7ce254c-1eb9-492a-92f8-bb60a797fa6f, infoPort=35725, infoSecurePort=0, ipcPort=46581, storageInfo=lv=-57;cid=testClusterID;nsid=1914024545;c=1732436896116) 2024-11-24T08:28:18,118 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa49d1be964a8e27f with lease ID 0xefeae7219bcdd9ec: from storage DS-9fd78f13-a204-43da-8d7e-b2637707712b node DatanodeRegistration(127.0.0.1:45027, datanodeUuid=c7ce254c-1eb9-492a-92f8-bb60a797fa6f, infoPort=35725, infoSecurePort=0, ipcPort=46581, storageInfo=lv=-57;cid=testClusterID;nsid=1914024545;c=1732436896116), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:28:18,170 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916 2024-11-24T08:28:18,246 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/cluster_85508b28-1dc5-893e-0cc9-827ba1015c20/zookeeper_0, clientPort=52809, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/cluster_85508b28-1dc5-893e-0cc9-827ba1015c20/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/cluster_85508b28-1dc5-893e-0cc9-827ba1015c20/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T08:28:18,257 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52809 2024-11-24T08:28:18,267 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:28:18,270 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:28:18,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:28:18,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:28:18,924 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747 with version=8 2024-11-24T08:28:18,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/hbase-staging 2024-11-24T08:28:19,037 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-24T08:28:19,296 INFO [Time-limited test {}] client.ConnectionUtils(128): master/30c28c82771d:0 server-side Connection retries=45 2024-11-24T08:28:19,307 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:28:19,308 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:28:19,314 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:28:19,314 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:28:19,314 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:28:19,464 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T08:28:19,526 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-24T08:28:19,534 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-24T08:28:19,538 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:28:19,566 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 19630 (auto-detected) 2024-11-24T08:28:19,567 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-24T08:28:19,586 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38311 2024-11-24T08:28:19,606 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38311 connecting to ZooKeeper ensemble=127.0.0.1:52809 2024-11-24T08:28:19,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:383110x0, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:28:19,644 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38311-0x10149171c430000 connected 2024-11-24T08:28:19,671 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:28:19,674 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:28:19,684 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:28:19,688 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747, hbase.cluster.distributed=false 2024-11-24T08:28:19,710 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:28:19,715 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38311 2024-11-24T08:28:19,717 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38311 2024-11-24T08:28:19,720 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38311 2024-11-24T08:28:19,721 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38311 2024-11-24T08:28:19,722 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38311 2024-11-24T08:28:19,840 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/30c28c82771d:0 server-side Connection retries=45 2024-11-24T08:28:19,842 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:28:19,842 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:28:19,843 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:28:19,843 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:28:19,843 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:28:19,847 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T08:28:19,850 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:28:19,851 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40485 2024-11-24T08:28:19,853 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40485 connecting to ZooKeeper ensemble=127.0.0.1:52809 2024-11-24T08:28:19,854 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:28:19,858 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:28:19,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:404850x0, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:28:19,867 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40485-0x10149171c430001 connected 2024-11-24T08:28:19,867 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:28:19,871 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T08:28:19,879 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T08:28:19,881 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T08:28:19,886 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:28:19,887 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40485 2024-11-24T08:28:19,887 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40485 2024-11-24T08:28:19,888 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40485 2024-11-24T08:28:19,889 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40485 2024-11-24T08:28:19,889 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40485 2024-11-24T08:28:19,905 DEBUG [M:0;30c28c82771d:38311 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;30c28c82771d:38311 2024-11-24T08:28:19,906 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/30c28c82771d,38311,1732436899093 2024-11-24T08:28:19,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:28:19,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:28:19,917 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/30c28c82771d,38311,1732436899093 2024-11-24T08:28:19,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T08:28:19,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:28:19,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:28:19,940 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T08:28:19,941 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/30c28c82771d,38311,1732436899093 from backup master directory 2024-11-24T08:28:19,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/30c28c82771d,38311,1732436899093 2024-11-24T08:28:19,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:28:19,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:28:19,944 WARN [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:28:19,945 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=30c28c82771d,38311,1732436899093 2024-11-24T08:28:19,947 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-24T08:28:19,948 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-24T08:28:20,005 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/hbase.id] with ID: 3a7bd28c-19fd-4d64-99b9-067a53d0584b 2024-11-24T08:28:20,006 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/.tmp/hbase.id 2024-11-24T08:28:20,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:28:20,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:28:20,020 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/.tmp/hbase.id]:[hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/hbase.id] 2024-11-24T08:28:20,062 INFO [master/30c28c82771d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:28:20,067 INFO [master/30c28c82771d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T08:28:20,087 INFO [master/30c28c82771d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-11-24T08:28:20,090 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:28:20,090 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:28:20,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:28:20,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:28:20,124 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:28:20,126 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T08:28:20,132 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:28:20,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:28:20,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:28:20,192 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store 2024-11-24T08:28:20,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:28:20,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:28:20,218 INFO [master/30c28c82771d:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-24T08:28:20,221 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:28:20,222 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:28:20,222 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:28:20,222 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:28:20,224 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:28:20,224 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:28:20,224 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:28:20,225 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732436900222Disabling compacts and flushes for region at 1732436900222Disabling writes for close at 1732436900224 (+2 ms)Writing region close event to WAL at 1732436900224Closed at 1732436900224 2024-11-24T08:28:20,227 WARN [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/.initializing 2024-11-24T08:28:20,227 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/WALs/30c28c82771d,38311,1732436899093 2024-11-24T08:28:20,250 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C38311%2C1732436899093, suffix=, logDir=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/WALs/30c28c82771d,38311,1732436899093, archiveDir=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/oldWALs, maxLogs=10 2024-11-24T08:28:20,261 INFO [master/30c28c82771d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C38311%2C1732436899093.1732436900256 2024-11-24T08:28:20,279 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/WALs/30c28c82771d,38311,1732436899093/30c28c82771d%2C38311%2C1732436899093.1732436900256 2024-11-24T08:28:20,287 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41647:41647),(127.0.0.1/127.0.0.1:35725:35725)] 2024-11-24T08:28:20,289 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:28:20,289 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:28:20,292 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:28:20,293 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:28:20,329 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:28:20,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T08:28:20,359 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:28:20,361 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:28:20,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:28:20,365 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T08:28:20,365 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:28:20,366 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:28:20,366 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:28:20,369 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T08:28:20,369 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:28:20,370 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:28:20,370 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:28:20,373 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T08:28:20,373 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:28:20,374 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:28:20,374 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:28:20,378 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:28:20,380 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:28:20,385 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:28:20,386 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:28:20,389 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T08:28:20,392 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:28:20,396 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:28:20,397 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=790850, jitterRate=0.005618557333946228}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T08:28:20,403 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732436900305Initializing all the Stores at 1732436900307 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732436900307Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732436900308 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732436900308Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732436900308Cleaning up temporary data from old regions at 1732436900386 (+78 ms)Region opened successfully at 1732436900403 (+17 ms) 2024-11-24T08:28:20,404 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T08:28:20,438 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f04549f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30c28c82771d/172.17.0.2:0 2024-11-24T08:28:20,471 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T08:28:20,482 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T08:28:20,482 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T08:28:20,486 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T08:28:20,487 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-24T08:28:20,492 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-24T08:28:20,492 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T08:28:20,518 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T08:28:20,529 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T08:28:20,532 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T08:28:20,535 INFO [master/30c28c82771d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T08:28:20,536 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T08:28:20,538 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T08:28:20,540 INFO [master/30c28c82771d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T08:28:20,543 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T08:28:20,545 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T08:28:20,546 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T08:28:20,547 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T08:28:20,564 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T08:28:20,566 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T08:28:20,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:28:20,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:28:20,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:28:20,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:28:20,573 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=30c28c82771d,38311,1732436899093, sessionid=0x10149171c430000, setting cluster-up flag (Was=false) 2024-11-24T08:28:20,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:28:20,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:28:20,592 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T08:28:20,594 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30c28c82771d,38311,1732436899093 2024-11-24T08:28:20,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:28:20,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:28:20,611 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T08:28:20,612 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30c28c82771d,38311,1732436899093 2024-11-24T08:28:20,619 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T08:28:20,691 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T08:28:20,693 INFO [RS:0;30c28c82771d:40485 {}] regionserver.HRegionServer(746): ClusterId : 3a7bd28c-19fd-4d64-99b9-067a53d0584b 2024-11-24T08:28:20,696 DEBUG [RS:0;30c28c82771d:40485 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T08:28:20,701 INFO [master/30c28c82771d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T08:28:20,701 DEBUG [RS:0;30c28c82771d:40485 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T08:28:20,702 DEBUG [RS:0;30c28c82771d:40485 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T08:28:20,704 DEBUG [RS:0;30c28c82771d:40485 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T08:28:20,705 DEBUG [RS:0;30c28c82771d:40485 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dd04b6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30c28c82771d/172.17.0.2:0 2024-11-24T08:28:20,709 INFO [master/30c28c82771d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T08:28:20,720 DEBUG [RS:0;30c28c82771d:40485 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;30c28c82771d:40485 2024-11-24T08:28:20,717 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 30c28c82771d,38311,1732436899093 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T08:28:20,723 INFO [RS:0;30c28c82771d:40485 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T08:28:20,723 INFO [RS:0;30c28c82771d:40485 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T08:28:20,723 DEBUG [RS:0;30c28c82771d:40485 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T08:28:20,726 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:28:20,726 INFO [RS:0;30c28c82771d:40485 {}] regionserver.HRegionServer(2659): reportForDuty to master=30c28c82771d,38311,1732436899093 with port=40485, startcode=1732436899799 2024-11-24T08:28:20,726 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:28:20,726 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:28:20,727 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:28:20,727 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/30c28c82771d:0, corePoolSize=10, maxPoolSize=10 2024-11-24T08:28:20,727 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:28:20,727 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/30c28c82771d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:28:20,728 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:28:20,731 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732436930731 2024-11-24T08:28:20,733 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T08:28:20,734 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:28:20,734 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T08:28:20,734 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T08:28:20,738 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T08:28:20,739 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T08:28:20,739 DEBUG [RS:0;30c28c82771d:40485 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T08:28:20,739 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T08:28:20,740 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T08:28:20,742 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:28:20,740 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:20,742 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T08:28:20,744 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T08:28:20,745 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T08:28:20,745 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T08:28:20,748 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T08:28:20,749 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T08:28:20,752 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732436900750,5,FailOnTimeoutGroup] 2024-11-24T08:28:20,753 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732436900753,5,FailOnTimeoutGroup] 2024-11-24T08:28:20,753 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:20,754 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T08:28:20,755 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:20,755 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:20,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:28:20,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:28:20,761 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T08:28:20,762 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747 2024-11-24T08:28:20,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:28:20,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:28:20,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:28:20,779 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:28:20,781 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:28:20,782 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:28:20,782 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:28:20,783 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:28:20,785 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:28:20,785 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:28:20,786 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:28:20,789 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:28:20,792 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:28:20,792 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:28:20,793 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:28:20,793 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:28:20,796 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:28:20,796 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:28:20,797 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:28:20,798 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:28:20,799 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740 2024-11-24T08:28:20,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740 2024-11-24T08:28:20,804 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:28:20,804 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:28:20,805 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:28:20,808 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:28:20,812 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:28:20,813 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=834044, jitterRate=0.06054206192493439}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:28:20,815 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732436900776Initializing all the Stores at 1732436900778 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732436900778Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732436900778Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732436900778Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732436900778Cleaning up temporary data from old regions at 1732436900804 (+26 ms)Region opened successfully at 1732436900815 (+11 ms) 2024-11-24T08:28:20,816 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:28:20,816 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:28:20,816 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:28:20,816 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:28:20,816 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:28:20,817 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:28:20,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732436900815Disabling compacts and flushes for region at 1732436900815Disabling writes for close at 1732436900816 (+1 ms)Writing region close event to WAL at 1732436900817 (+1 ms)Closed at 1732436900817 2024-11-24T08:28:20,819 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49355, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T08:28:20,821 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:28:20,821 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T08:28:20,825 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38311 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 30c28c82771d,40485,1732436899799 2024-11-24T08:28:20,828 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T08:28:20,828 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38311 {}] master.ServerManager(517): Registering regionserver=30c28c82771d,40485,1732436899799 2024-11-24T08:28:20,836 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:28:20,838 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T08:28:20,844 DEBUG [RS:0;30c28c82771d:40485 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747 2024-11-24T08:28:20,844 DEBUG [RS:0;30c28c82771d:40485 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36899 2024-11-24T08:28:20,844 DEBUG [RS:0;30c28c82771d:40485 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T08:28:20,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:28:20,849 DEBUG [RS:0;30c28c82771d:40485 {}] zookeeper.ZKUtil(111): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/30c28c82771d,40485,1732436899799 2024-11-24T08:28:20,849 WARN [RS:0;30c28c82771d:40485 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:28:20,849 INFO [RS:0;30c28c82771d:40485 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:28:20,850 DEBUG [RS:0;30c28c82771d:40485 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799 2024-11-24T08:28:20,851 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [30c28c82771d,40485,1732436899799] 2024-11-24T08:28:20,876 INFO [RS:0;30c28c82771d:40485 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T08:28:20,887 INFO [RS:0;30c28c82771d:40485 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T08:28:20,892 INFO [RS:0;30c28c82771d:40485 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T08:28:20,892 INFO [RS:0;30c28c82771d:40485 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:20,893 INFO [RS:0;30c28c82771d:40485 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T08:28:20,899 INFO [RS:0;30c28c82771d:40485 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T08:28:20,900 INFO [RS:0;30c28c82771d:40485 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:20,900 DEBUG [RS:0;30c28c82771d:40485 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:28:20,901 DEBUG [RS:0;30c28c82771d:40485 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:28:20,901 DEBUG [RS:0;30c28c82771d:40485 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:28:20,901 DEBUG [RS:0;30c28c82771d:40485 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:28:20,901 DEBUG [RS:0;30c28c82771d:40485 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:28:20,901 DEBUG [RS:0;30c28c82771d:40485 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/30c28c82771d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:28:20,901 DEBUG [RS:0;30c28c82771d:40485 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:28:20,901 DEBUG [RS:0;30c28c82771d:40485 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:28:20,902 DEBUG [RS:0;30c28c82771d:40485 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:28:20,902 DEBUG [RS:0;30c28c82771d:40485 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:28:20,902 DEBUG [RS:0;30c28c82771d:40485 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:28:20,902 DEBUG [RS:0;30c28c82771d:40485 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:28:20,902 DEBUG [RS:0;30c28c82771d:40485 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/30c28c82771d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:28:20,902 DEBUG [RS:0;30c28c82771d:40485 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:28:20,903 INFO [RS:0;30c28c82771d:40485 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:20,903 INFO [RS:0;30c28c82771d:40485 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:20,903 INFO [RS:0;30c28c82771d:40485 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:20,904 INFO [RS:0;30c28c82771d:40485 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:20,904 INFO [RS:0;30c28c82771d:40485 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:20,904 INFO [RS:0;30c28c82771d:40485 {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,40485,1732436899799-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:28:20,923 INFO [RS:0;30c28c82771d:40485 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T08:28:20,924 INFO [RS:0;30c28c82771d:40485 {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,40485,1732436899799-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:20,925 INFO [RS:0;30c28c82771d:40485 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:20,925 INFO [RS:0;30c28c82771d:40485 {}] regionserver.Replication(171): 30c28c82771d,40485,1732436899799 started 2024-11-24T08:28:20,943 INFO [RS:0;30c28c82771d:40485 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:20,944 INFO [RS:0;30c28c82771d:40485 {}] regionserver.HRegionServer(1482): Serving as 30c28c82771d,40485,1732436899799, RpcServer on 30c28c82771d/172.17.0.2:40485, sessionid=0x10149171c430001 2024-11-24T08:28:20,944 DEBUG [RS:0;30c28c82771d:40485 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T08:28:20,945 DEBUG [RS:0;30c28c82771d:40485 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 30c28c82771d,40485,1732436899799 2024-11-24T08:28:20,945 DEBUG [RS:0;30c28c82771d:40485 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30c28c82771d,40485,1732436899799' 2024-11-24T08:28:20,945 DEBUG [RS:0;30c28c82771d:40485 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T08:28:20,946 DEBUG [RS:0;30c28c82771d:40485 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T08:28:20,947 DEBUG [RS:0;30c28c82771d:40485 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T08:28:20,947 DEBUG [RS:0;30c28c82771d:40485 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T08:28:20,947 DEBUG [RS:0;30c28c82771d:40485 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 30c28c82771d,40485,1732436899799 2024-11-24T08:28:20,948 DEBUG [RS:0;30c28c82771d:40485 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30c28c82771d,40485,1732436899799' 2024-11-24T08:28:20,948 DEBUG [RS:0;30c28c82771d:40485 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T08:28:20,948 DEBUG [RS:0;30c28c82771d:40485 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T08:28:20,949 DEBUG [RS:0;30c28c82771d:40485 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T08:28:20,949 INFO [RS:0;30c28c82771d:40485 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T08:28:20,949 INFO [RS:0;30c28c82771d:40485 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T08:28:20,989 WARN [30c28c82771d:38311 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T08:28:21,057 INFO [RS:0;30c28c82771d:40485 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C40485%2C1732436899799, suffix=, logDir=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799, archiveDir=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/oldWALs, maxLogs=32 2024-11-24T08:28:21,060 INFO [RS:0;30c28c82771d:40485 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C40485%2C1732436899799.1732436901060 2024-11-24T08:28:21,069 INFO [RS:0;30c28c82771d:40485 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436901060 2024-11-24T08:28:21,071 DEBUG [RS:0;30c28c82771d:40485 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35725:35725),(127.0.0.1/127.0.0.1:41647:41647)] 2024-11-24T08:28:21,242 DEBUG [30c28c82771d:38311 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T08:28:21,253 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=30c28c82771d,40485,1732436899799 2024-11-24T08:28:21,260 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30c28c82771d,40485,1732436899799, state=OPENING 2024-11-24T08:28:21,265 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T08:28:21,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:28:21,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:28:21,268 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:28:21,268 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:28:21,269 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:28:21,271 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=30c28c82771d,40485,1732436899799}] 2024-11-24T08:28:21,447 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T08:28:21,451 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39597, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T08:28:21,463 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T08:28:21,463 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:28:21,467 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C40485%2C1732436899799.meta, suffix=.meta, logDir=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799, archiveDir=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/oldWALs, maxLogs=32 2024-11-24T08:28:21,469 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C40485%2C1732436899799.meta.1732436901469.meta 2024-11-24T08:28:21,476 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.meta.1732436901469.meta 2024-11-24T08:28:21,477 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35725:35725),(127.0.0.1/127.0.0.1:41647:41647)] 2024-11-24T08:28:21,484 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:28:21,486 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T08:28:21,489 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T08:28:21,494 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T08:28:21,498 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T08:28:21,499 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:28:21,499 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T08:28:21,499 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T08:28:21,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:28:21,503 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:28:21,504 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:28:21,504 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:28:21,505 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:28:21,506 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:28:21,506 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:28:21,507 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:28:21,507 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:28:21,508 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:28:21,508 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:28:21,509 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:28:21,509 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:28:21,511 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:28:21,511 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:28:21,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:28:21,512 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:28:21,513 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740 2024-11-24T08:28:21,515 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740 2024-11-24T08:28:21,517 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:28:21,517 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:28:21,518 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:28:21,521 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:28:21,522 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=827946, jitterRate=0.05278903245925903}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:28:21,523 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T08:28:21,524 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732436901500Writing region info on filesystem at 1732436901500Initializing all the Stores at 1732436901502 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732436901502Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732436901502Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732436901502Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732436901502Cleaning up temporary data from old regions at 1732436901517 (+15 ms)Running coprocessor post-open hooks at 1732436901523 (+6 ms)Region opened successfully at 1732436901523 2024-11-24T08:28:21,530 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732436901437 2024-11-24T08:28:21,542 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T08:28:21,543 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T08:28:21,544 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=30c28c82771d,40485,1732436899799 2024-11-24T08:28:21,546 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30c28c82771d,40485,1732436899799, state=OPEN 2024-11-24T08:28:21,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:28:21,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:28:21,552 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:28:21,552 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:28:21,552 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=30c28c82771d,40485,1732436899799 2024-11-24T08:28:21,558 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T08:28:21,558 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=30c28c82771d,40485,1732436899799 in 281 msec 2024-11-24T08:28:21,565 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T08:28:21,565 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 732 msec 2024-11-24T08:28:21,566 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:28:21,567 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T08:28:21,587 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:28:21,588 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30c28c82771d,40485,1732436899799, seqNum=-1] 2024-11-24T08:28:21,612 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:28:21,614 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55599, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:28:21,636 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 988 msec 2024-11-24T08:28:21,636 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732436901636, completionTime=-1 2024-11-24T08:28:21,639 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T08:28:21,639 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T08:28:21,672 INFO [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T08:28:21,672 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732436961672 2024-11-24T08:28:21,672 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732437021672 2024-11-24T08:28:21,672 INFO [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 33 msec 2024-11-24T08:28:21,675 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,38311,1732436899093-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:21,676 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,38311,1732436899093-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:21,676 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,38311,1732436899093-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:21,677 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-30c28c82771d:38311, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:21,678 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:21,678 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:21,685 DEBUG [master/30c28c82771d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T08:28:21,706 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.761sec 2024-11-24T08:28:21,707 INFO [master/30c28c82771d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T08:28:21,708 INFO [master/30c28c82771d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T08:28:21,709 INFO [master/30c28c82771d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T08:28:21,710 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T08:28:21,710 INFO [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T08:28:21,711 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,38311,1732436899093-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:28:21,711 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,38311,1732436899093-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T08:28:21,721 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T08:28:21,722 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T08:28:21,722 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,38311,1732436899093-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:28:21,804 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4731d90b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:28:21,807 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-24T08:28:21,807 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-24T08:28:21,811 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 30c28c82771d,38311,-1 for getting cluster id 2024-11-24T08:28:21,815 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T08:28:21,826 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3a7bd28c-19fd-4d64-99b9-067a53d0584b' 2024-11-24T08:28:21,829 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T08:28:21,829 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3a7bd28c-19fd-4d64-99b9-067a53d0584b" 2024-11-24T08:28:21,832 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e3fdc6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:28:21,832 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [30c28c82771d,38311,-1] 2024-11-24T08:28:21,835 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T08:28:21,837 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:28:21,839 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52222, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T08:28:21,842 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2eb3700d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:28:21,843 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:28:21,850 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30c28c82771d,40485,1732436899799, seqNum=-1] 2024-11-24T08:28:21,850 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:28:21,853 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54300, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:28:21,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=30c28c82771d,38311,1732436899093 2024-11-24T08:28:21,874 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:28:21,882 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T08:28:21,886 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T08:28:21,891 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 30c28c82771d,38311,1732436899093 2024-11-24T08:28:21,895 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1329d828 2024-11-24T08:28:21,896 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T08:28:21,899 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52236, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T08:28:21,901 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38311 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T08:28:21,901 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38311 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T08:28:21,906 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38311 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:28:21,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38311 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-24T08:28:21,919 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T08:28:21,921 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38311 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-24T08:28:21,921 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:28:21,923 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T08:28:21,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38311 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T08:28:21,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741835_1011 (size=389) 2024-11-24T08:28:21,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741835_1011 (size=389) 2024-11-24T08:28:21,992 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 88d8569a8c7ac25de3146945b69e3fbd, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747 2024-11-24T08:28:22,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741836_1012 (size=72) 2024-11-24T08:28:22,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741836_1012 (size=72) 2024-11-24T08:28:22,005 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:28:22,005 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 88d8569a8c7ac25de3146945b69e3fbd, disabling compactions & flushes 2024-11-24T08:28:22,005 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd. 2024-11-24T08:28:22,005 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd. 2024-11-24T08:28:22,005 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd. after waiting 0 ms 2024-11-24T08:28:22,005 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd. 2024-11-24T08:28:22,005 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd. 2024-11-24T08:28:22,005 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 88d8569a8c7ac25de3146945b69e3fbd: Waiting for close lock at 1732436902005Disabling compacts and flushes for region at 1732436902005Disabling writes for close at 1732436902005Writing region close event to WAL at 1732436902005Closed at 1732436902005 2024-11-24T08:28:22,007 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T08:28:22,012 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732436902008"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732436902008"}]},"ts":"1732436902008"} 2024-11-24T08:28:22,018 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T08:28:22,020 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T08:28:22,023 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732436902020"}]},"ts":"1732436902020"} 2024-11-24T08:28:22,028 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-24T08:28:22,029 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=88d8569a8c7ac25de3146945b69e3fbd, ASSIGN}] 2024-11-24T08:28:22,032 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=88d8569a8c7ac25de3146945b69e3fbd, ASSIGN 2024-11-24T08:28:22,034 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=88d8569a8c7ac25de3146945b69e3fbd, ASSIGN; state=OFFLINE, location=30c28c82771d,40485,1732436899799; forceNewPlan=false, retain=false 2024-11-24T08:28:22,185 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=88d8569a8c7ac25de3146945b69e3fbd, regionState=OPENING, regionLocation=30c28c82771d,40485,1732436899799 2024-11-24T08:28:22,190 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=88d8569a8c7ac25de3146945b69e3fbd, ASSIGN because future has completed 2024-11-24T08:28:22,191 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 88d8569a8c7ac25de3146945b69e3fbd, server=30c28c82771d,40485,1732436899799}] 2024-11-24T08:28:22,351 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd. 2024-11-24T08:28:22,352 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 88d8569a8c7ac25de3146945b69e3fbd, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:28:22,352 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 88d8569a8c7ac25de3146945b69e3fbd 2024-11-24T08:28:22,352 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:28:22,352 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 88d8569a8c7ac25de3146945b69e3fbd 2024-11-24T08:28:22,352 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 88d8569a8c7ac25de3146945b69e3fbd 2024-11-24T08:28:22,355 INFO [StoreOpener-88d8569a8c7ac25de3146945b69e3fbd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 88d8569a8c7ac25de3146945b69e3fbd 2024-11-24T08:28:22,357 INFO [StoreOpener-88d8569a8c7ac25de3146945b69e3fbd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 88d8569a8c7ac25de3146945b69e3fbd columnFamilyName info 2024-11-24T08:28:22,357 DEBUG [StoreOpener-88d8569a8c7ac25de3146945b69e3fbd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:28:22,358 INFO [StoreOpener-88d8569a8c7ac25de3146945b69e3fbd-1 {}] regionserver.HStore(327): Store=88d8569a8c7ac25de3146945b69e3fbd/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:28:22,358 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 88d8569a8c7ac25de3146945b69e3fbd 2024-11-24T08:28:22,360 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd 2024-11-24T08:28:22,360 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd 2024-11-24T08:28:22,361 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 88d8569a8c7ac25de3146945b69e3fbd 2024-11-24T08:28:22,361 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 88d8569a8c7ac25de3146945b69e3fbd 2024-11-24T08:28:22,363 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 88d8569a8c7ac25de3146945b69e3fbd 2024-11-24T08:28:22,367 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:28:22,368 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 88d8569a8c7ac25de3146945b69e3fbd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=699427, jitterRate=-0.11063367128372192}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T08:28:22,368 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 88d8569a8c7ac25de3146945b69e3fbd 2024-11-24T08:28:22,369 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 88d8569a8c7ac25de3146945b69e3fbd: Running coprocessor pre-open hook at 1732436902353Writing region info on filesystem at 1732436902353Initializing all the Stores at 1732436902354 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732436902354Cleaning up temporary data from old regions at 1732436902361 (+7 ms)Running coprocessor post-open hooks at 1732436902368 (+7 ms)Region opened successfully at 1732436902369 (+1 ms) 2024-11-24T08:28:22,371 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd., pid=6, masterSystemTime=1732436902345 2024-11-24T08:28:22,374 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd. 2024-11-24T08:28:22,374 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd. 2024-11-24T08:28:22,376 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=88d8569a8c7ac25de3146945b69e3fbd, regionState=OPEN, openSeqNum=2, regionLocation=30c28c82771d,40485,1732436899799 2024-11-24T08:28:22,379 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 88d8569a8c7ac25de3146945b69e3fbd, server=30c28c82771d,40485,1732436899799 because future has completed 2024-11-24T08:28:22,386 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T08:28:22,386 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 88d8569a8c7ac25de3146945b69e3fbd, server=30c28c82771d,40485,1732436899799 in 192 msec 2024-11-24T08:28:22,390 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T08:28:22,390 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=88d8569a8c7ac25de3146945b69e3fbd, ASSIGN in 357 msec 2024-11-24T08:28:22,392 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T08:28:22,392 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732436902392"}]},"ts":"1732436902392"} 2024-11-24T08:28:22,395 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-24T08:28:22,397 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T08:28:22,400 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 488 msec 2024-11-24T08:28:27,003 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-24T08:28:27,053 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T08:28:27,055 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-24T08:28:29,523 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T08:28:29,523 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T08:28:29,525 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-24T08:28:29,525 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-24T08:28:29,526 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:28:29,526 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T08:28:29,526 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T08:28:29,526 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-24T08:28:31,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38311 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T08:28:31,998 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-24T08:28:32,001 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-24T08:28:32,007 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-24T08:28:32,008 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd. 2024-11-24T08:28:32,009 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C40485%2C1732436899799.1732436912008 2024-11-24T08:28:32,017 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:28:32,017 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:28:32,017 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:28:32,018 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:28:32,018 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:28:32,018 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436901060 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436912008 2024-11-24T08:28:32,019 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41647:41647),(127.0.0.1/127.0.0.1:35725:35725)] 2024-11-24T08:28:32,020 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436901060 is not closed yet, will try archiving it next time 2024-11-24T08:28:32,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741833_1009 (size=451) 2024-11-24T08:28:32,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741833_1009 (size=451) 2024-11-24T08:28:32,027 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436901060 to hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/oldWALs/30c28c82771d%2C40485%2C1732436899799.1732436901060 2024-11-24T08:28:32,029 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd., hostname=30c28c82771d,40485,1732436899799, seqNum=2] 2024-11-24T08:28:44,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40485 {}] regionserver.HRegion(8855): Flush requested on 88d8569a8c7ac25de3146945b69e3fbd 2024-11-24T08:28:44,064 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 88d8569a8c7ac25de3146945b69e3fbd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:28:44,123 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/.tmp/info/dcb76782dc9549e1b8a5ade463e3620a is 1080, key is row0001/info:/1732436912032/Put/seqid=0 2024-11-24T08:28:44,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741838_1014 (size=12509) 2024-11-24T08:28:44,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741838_1014 (size=12509) 2024-11-24T08:28:44,136 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/.tmp/info/dcb76782dc9549e1b8a5ade463e3620a 2024-11-24T08:28:44,193 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/.tmp/info/dcb76782dc9549e1b8a5ade463e3620a as hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/dcb76782dc9549e1b8a5ade463e3620a 2024-11-24T08:28:44,202 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/dcb76782dc9549e1b8a5ade463e3620a, entries=7, sequenceid=11, filesize=12.2 K 2024-11-24T08:28:44,209 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 88d8569a8c7ac25de3146945b69e3fbd in 145ms, sequenceid=11, compaction requested=false 2024-11-24T08:28:44,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 88d8569a8c7ac25de3146945b69e3fbd: 2024-11-24T08:28:48,167 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T08:28:52,074 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C40485%2C1732436899799.1732436932074 2024-11-24T08:28:52,283 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK]] 2024-11-24T08:28:52,283 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:28:52,283 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:28:52,284 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:28:52,284 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:28:52,284 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:28:52,284 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436912008 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436932074 2024-11-24T08:28:52,285 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41647:41647),(127.0.0.1/127.0.0.1:35725:35725)] 2024-11-24T08:28:52,285 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436912008 is not closed yet, will try archiving it next time 2024-11-24T08:28:52,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741837_1013 (size=12399) 2024-11-24T08:28:52,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741837_1013 (size=12399) 2024-11-24T08:28:52,490 INFO [FSHLog-0-hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747-prefix:30c28c82771d,40485,1732436899799 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK]] 2024-11-24T08:28:54,695 INFO [FSHLog-0-hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747-prefix:30c28c82771d,40485,1732436899799 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK]] 2024-11-24T08:28:56,899 INFO [FSHLog-0-hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747-prefix:30c28c82771d,40485,1732436899799 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK]] 2024-11-24T08:28:59,103 INFO [FSHLog-0-hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747-prefix:30c28c82771d,40485,1732436899799 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK]] 2024-11-24T08:28:59,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40485 {}] regionserver.HRegion(8855): Flush requested on 88d8569a8c7ac25de3146945b69e3fbd 2024-11-24T08:28:59,104 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 88d8569a8c7ac25de3146945b69e3fbd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:28:59,305 INFO [FSHLog-0-hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747-prefix:30c28c82771d,40485,1732436899799 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK]] 2024-11-24T08:28:59,312 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/.tmp/info/f38f88b3aad644efa48cb5c7ca34bede is 1080, key is row0008/info:/1732436926063/Put/seqid=0 2024-11-24T08:28:59,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741840_1016 (size=12509) 2024-11-24T08:28:59,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741840_1016 (size=12509) 2024-11-24T08:28:59,725 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/.tmp/info/f38f88b3aad644efa48cb5c7ca34bede 2024-11-24T08:28:59,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/.tmp/info/f38f88b3aad644efa48cb5c7ca34bede as hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/f38f88b3aad644efa48cb5c7ca34bede 2024-11-24T08:28:59,745 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/f38f88b3aad644efa48cb5c7ca34bede, entries=7, sequenceid=21, filesize=12.2 K 2024-11-24T08:28:59,947 INFO [FSHLog-0-hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747-prefix:30c28c82771d,40485,1732436899799 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK]] 2024-11-24T08:28:59,948 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 88d8569a8c7ac25de3146945b69e3fbd in 844ms, sequenceid=21, compaction requested=false 2024-11-24T08:28:59,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 88d8569a8c7ac25de3146945b69e3fbd: 2024-11-24T08:28:59,948 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-24T08:28:59,948 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:28:59,949 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/dcb76782dc9549e1b8a5ade463e3620a because midkey is the same as first or last row 2024-11-24T08:29:01,308 INFO [FSHLog-0-hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747-prefix:30c28c82771d,40485,1732436899799 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK]] 2024-11-24T08:29:01,771 INFO [master/30c28c82771d:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T08:29:01,771 INFO [master/30c28c82771d:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T08:29:03,512 INFO [FSHLog-0-hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747-prefix:30c28c82771d,40485,1732436899799 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK]] 2024-11-24T08:29:03,515 WARN [FSHLog-0-hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747-prefix:30c28c82771d,40485,1732436899799 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK]] 2024-11-24T08:29:03,516 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30c28c82771d%2C40485%2C1732436899799:(num 1732436932074) roll requested 2024-11-24T08:29:03,516 INFO [regionserver/30c28c82771d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C40485%2C1732436899799.1732436943516 2024-11-24T08:29:03,724 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK]] 2024-11-24T08:29:03,725 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:03,725 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:03,725 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:03,725 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:03,725 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:03,725 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436932074 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436943516 2024-11-24T08:29:03,726 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35725:35725),(127.0.0.1/127.0.0.1:41647:41647)] 2024-11-24T08:29:03,726 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436932074 is not closed yet, will try archiving it next time 2024-11-24T08:29:03,727 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436912008 to hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/oldWALs/30c28c82771d%2C40485%2C1732436899799.1732436912008 2024-11-24T08:29:03,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741839_1015 (size=7739) 2024-11-24T08:29:03,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741839_1015 (size=7739) 2024-11-24T08:29:05,717 INFO [FSHLog-0-hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747-prefix:30c28c82771d,40485,1732436899799 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK], DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK]] 2024-11-24T08:29:07,352 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 88d8569a8c7ac25de3146945b69e3fbd, had cached 0 bytes from a total of 25018 2024-11-24T08:29:07,921 INFO [FSHLog-0-hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747-prefix:30c28c82771d,40485,1732436899799 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK], DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK]] 2024-11-24T08:29:10,125 INFO [FSHLog-0-hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747-prefix:30c28c82771d,40485,1732436899799 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK], DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK]] 2024-11-24T08:29:12,329 INFO [FSHLog-0-hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747-prefix:30c28c82771d,40485,1732436899799 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK], DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK]] 2024-11-24T08:29:14,331 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T08:29:14,332 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C40485%2C1732436899799.1732436954332 2024-11-24T08:29:18,167 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T08:29:19,340 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK], DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK]] 2024-11-24T08:29:19,343 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK], DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK]] 2024-11-24T08:29:19,343 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30c28c82771d%2C40485%2C1732436899799:(num 1732436954332) roll requested 2024-11-24T08:29:19,343 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:19,344 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:19,344 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:19,344 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:19,344 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:19,344 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436943516 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436954332 2024-11-24T08:29:19,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741841_1017 (size=4753) 2024-11-24T08:29:19,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741841_1017 (size=4753) 2024-11-24T08:29:19,353 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35725:35725),(127.0.0.1/127.0.0.1:41647:41647)] 2024-11-24T08:29:19,353 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436943516 is not closed yet, will try archiving it next time 2024-11-24T08:29:19,353 INFO [regionserver/30c28c82771d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C40485%2C1732436899799.1732436959353 2024-11-24T08:29:24,356 INFO [FSHLog-0-hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747-prefix:30c28c82771d,40485,1732436899799 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK], DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK]] 2024-11-24T08:29:24,356 WARN [FSHLog-0-hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747-prefix:30c28c82771d,40485,1732436899799 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK], DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK]] 2024-11-24T08:29:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40485 {}] regionserver.HRegion(8855): Flush requested on 88d8569a8c7ac25de3146945b69e3fbd 2024-11-24T08:29:24,357 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 88d8569a8c7ac25de3146945b69e3fbd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:29:24,362 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5007 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK], DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK]] 2024-11-24T08:29:24,362 WARN [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5007 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK], DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK]] 2024-11-24T08:29:26,357 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T08:29:29,359 INFO [FSHLog-0-hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747-prefix:30c28c82771d,40485,1732436899799 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK], DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK]] 2024-11-24T08:29:29,359 WARN [FSHLog-0-hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747-prefix:30c28c82771d,40485,1732436899799 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK], DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK]] 2024-11-24T08:29:29,359 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:29,359 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:29,359 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:29,360 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:29,360 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:29,360 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436954332 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436959353 2024-11-24T08:29:29,361 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35725:35725),(127.0.0.1/127.0.0.1:41647:41647)] 2024-11-24T08:29:29,361 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436954332 is not closed yet, will try archiving it next time 2024-11-24T08:29:29,361 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30c28c82771d%2C40485%2C1732436899799:(num 1732436959353) roll requested 2024-11-24T08:29:29,362 INFO [regionserver/30c28c82771d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C40485%2C1732436899799.1732436969361 2024-11-24T08:29:29,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741842_1018 (size=1569) 2024-11-24T08:29:29,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741842_1018 (size=1569) 2024-11-24T08:29:29,365 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/.tmp/info/d8028667f68c4615a76f6cbe3bbcac7b is 1080, key is row0015/info:/1732436941106/Put/seqid=0 2024-11-24T08:29:29,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741844_1020 (size=12509) 2024-11-24T08:29:29,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741844_1020 (size=12509) 2024-11-24T08:29:29,377 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/.tmp/info/d8028667f68c4615a76f6cbe3bbcac7b 2024-11-24T08:29:29,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/.tmp/info/d8028667f68c4615a76f6cbe3bbcac7b as hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/d8028667f68c4615a76f6cbe3bbcac7b 2024-11-24T08:29:29,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/d8028667f68c4615a76f6cbe3bbcac7b, entries=7, sequenceid=31, filesize=12.2 K 2024-11-24T08:29:34,371 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK], DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK]] 2024-11-24T08:29:34,371 WARN [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK], DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK]] 2024-11-24T08:29:34,396 INFO [FSHLog-0-hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747-prefix:30c28c82771d,40485,1732436899799 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK], DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK]] 2024-11-24T08:29:34,396 WARN [FSHLog-0-hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747-prefix:30c28c82771d,40485,1732436899799 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45027,DS-5626af90-b1b6-41ec-aa3b-9ca43907fdb3,DISK], DatanodeInfoWithStorage[127.0.0.1:45079,DS-9fb564a3-ac8d-4ea3-b0a8-512e672d7d9c,DISK]] 2024-11-24T08:29:34,396 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 88d8569a8c7ac25de3146945b69e3fbd in 10040ms, sequenceid=31, compaction requested=true 2024-11-24T08:29:34,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 88d8569a8c7ac25de3146945b69e3fbd: 2024-11-24T08:29:34,397 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:34,397 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:34,397 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-24T08:29:34,397 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:34,397 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:29:34,397 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/dcb76782dc9549e1b8a5ade463e3620a because midkey is the same as first or last row 2024-11-24T08:29:34,397 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:34,397 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:34,397 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436959353 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436969361 2024-11-24T08:29:34,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 88d8569a8c7ac25de3146945b69e3fbd:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:29:34,399 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35725:35725),(127.0.0.1/127.0.0.1:41647:41647)] 2024-11-24T08:29:34,399 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436959353 is not closed yet, will try archiving it next time 2024-11-24T08:29:34,399 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30c28c82771d%2C40485%2C1732436899799:(num 1732436969361) roll requested 2024-11-24T08:29:34,400 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436932074 to hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/oldWALs/30c28c82771d%2C40485%2C1732436899799.1732436932074 2024-11-24T08:29:34,400 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C40485%2C1732436899799.1732436974400 2024-11-24T08:29:34,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741843_1019 (size=438) 2024-11-24T08:29:34,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741843_1019 (size=438) 2024-11-24T08:29:34,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:29:34,402 DEBUG [RS:0;30c28c82771d:40485-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:29:34,402 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436943516 to hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/oldWALs/30c28c82771d%2C40485%2C1732436899799.1732436943516 2024-11-24T08:29:34,404 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436954332 to hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/oldWALs/30c28c82771d%2C40485%2C1732436899799.1732436954332 2024-11-24T08:29:34,405 DEBUG [RS:0;30c28c82771d:40485-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:29:34,405 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436959353 to hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/oldWALs/30c28c82771d%2C40485%2C1732436899799.1732436959353 2024-11-24T08:29:34,407 DEBUG [RS:0;30c28c82771d:40485-shortCompactions-0 {}] regionserver.HStore(1541): 88d8569a8c7ac25de3146945b69e3fbd/info is initiating minor compaction (all files) 2024-11-24T08:29:34,407 INFO [RS:0;30c28c82771d:40485-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 88d8569a8c7ac25de3146945b69e3fbd/info in TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd. 2024-11-24T08:29:34,408 INFO [RS:0;30c28c82771d:40485-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/dcb76782dc9549e1b8a5ade463e3620a, hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/f38f88b3aad644efa48cb5c7ca34bede, hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/d8028667f68c4615a76f6cbe3bbcac7b] into tmpdir=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/.tmp, totalSize=36.6 K 2024-11-24T08:29:34,410 DEBUG [RS:0;30c28c82771d:40485-shortCompactions-0 {}] compactions.Compactor(225): Compacting dcb76782dc9549e1b8a5ade463e3620a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732436912032 2024-11-24T08:29:34,410 DEBUG [RS:0;30c28c82771d:40485-shortCompactions-0 {}] compactions.Compactor(225): Compacting f38f88b3aad644efa48cb5c7ca34bede, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732436926063 2024-11-24T08:29:34,411 DEBUG [RS:0;30c28c82771d:40485-shortCompactions-0 {}] compactions.Compactor(225): Compacting d8028667f68c4615a76f6cbe3bbcac7b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732436941106 2024-11-24T08:29:34,427 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:34,428 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:34,428 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:34,428 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:34,428 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:34,429 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436969361 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436974400 2024-11-24T08:29:34,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741845_1021 (size=93) 2024-11-24T08:29:34,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741845_1021 (size=93) 2024-11-24T08:29:34,432 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436969361 to hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/oldWALs/30c28c82771d%2C40485%2C1732436899799.1732436969361 2024-11-24T08:29:34,445 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41647:41647),(127.0.0.1/127.0.0.1:35725:35725)] 2024-11-24T08:29:34,446 INFO [regionserver/30c28c82771d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C40485%2C1732436899799.1732436974445 2024-11-24T08:29:34,459 INFO [RS:0;30c28c82771d:40485-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 88d8569a8c7ac25de3146945b69e3fbd#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:29:34,459 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:34,460 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:34,460 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:34,460 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:34,460 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:34,460 DEBUG [RS:0;30c28c82771d:40485-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/.tmp/info/7ee0ebae4c1b4fb38edcaf4efaed877e is 1080, key is row0001/info:/1732436912032/Put/seqid=0 2024-11-24T08:29:34,460 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436974400 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/WALs/30c28c82771d,40485,1732436899799/30c28c82771d%2C40485%2C1732436899799.1732436974445 2024-11-24T08:29:34,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741846_1022 (size=1258) 2024-11-24T08:29:34,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741846_1022 (size=1258) 2024-11-24T08:29:34,464 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35725:35725),(127.0.0.1/127.0.0.1:41647:41647)] 2024-11-24T08:29:34,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741848_1024 (size=27710) 2024-11-24T08:29:34,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741848_1024 (size=27710) 2024-11-24T08:29:34,484 DEBUG [RS:0;30c28c82771d:40485-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/.tmp/info/7ee0ebae4c1b4fb38edcaf4efaed877e as hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/7ee0ebae4c1b4fb38edcaf4efaed877e 2024-11-24T08:29:34,503 INFO [RS:0;30c28c82771d:40485-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 88d8569a8c7ac25de3146945b69e3fbd/info of 88d8569a8c7ac25de3146945b69e3fbd into 7ee0ebae4c1b4fb38edcaf4efaed877e(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:29:34,504 DEBUG [RS:0;30c28c82771d:40485-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 88d8569a8c7ac25de3146945b69e3fbd: 2024-11-24T08:29:34,507 INFO [RS:0;30c28c82771d:40485-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd., storeName=88d8569a8c7ac25de3146945b69e3fbd/info, priority=13, startTime=1732436974398; duration=0sec 2024-11-24T08:29:34,507 DEBUG [RS:0;30c28c82771d:40485-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-24T08:29:34,507 DEBUG [RS:0;30c28c82771d:40485-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:29:34,507 DEBUG [RS:0;30c28c82771d:40485-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/7ee0ebae4c1b4fb38edcaf4efaed877e because midkey is the same as first or last row 2024-11-24T08:29:34,508 DEBUG [RS:0;30c28c82771d:40485-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-24T08:29:34,508 DEBUG [RS:0;30c28c82771d:40485-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:29:34,508 DEBUG [RS:0;30c28c82771d:40485-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/7ee0ebae4c1b4fb38edcaf4efaed877e because midkey is the same as first or last row 2024-11-24T08:29:34,508 DEBUG [RS:0;30c28c82771d:40485-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-24T08:29:34,508 DEBUG [RS:0;30c28c82771d:40485-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:29:34,508 DEBUG [RS:0;30c28c82771d:40485-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/7ee0ebae4c1b4fb38edcaf4efaed877e because midkey is the same as first or last row 2024-11-24T08:29:34,508 DEBUG [RS:0;30c28c82771d:40485-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:29:34,508 DEBUG [RS:0;30c28c82771d:40485-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 88d8569a8c7ac25de3146945b69e3fbd:info 2024-11-24T08:29:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40485 {}] regionserver.HRegion(8855): Flush requested on 88d8569a8c7ac25de3146945b69e3fbd 2024-11-24T08:29:46,473 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 88d8569a8c7ac25de3146945b69e3fbd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:29:46,480 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/.tmp/info/f396892cefe5474497b4f0edb68d3371 is 1080, key is row0022/info:/1732436974447/Put/seqid=0 2024-11-24T08:29:46,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741849_1025 (size=12509) 2024-11-24T08:29:46,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741849_1025 (size=12509) 2024-11-24T08:29:46,488 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/.tmp/info/f396892cefe5474497b4f0edb68d3371 2024-11-24T08:29:46,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/.tmp/info/f396892cefe5474497b4f0edb68d3371 as hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/f396892cefe5474497b4f0edb68d3371 2024-11-24T08:29:46,506 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/f396892cefe5474497b4f0edb68d3371, entries=7, sequenceid=42, filesize=12.2 K 2024-11-24T08:29:46,507 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 88d8569a8c7ac25de3146945b69e3fbd in 34ms, sequenceid=42, compaction requested=false 2024-11-24T08:29:46,507 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 88d8569a8c7ac25de3146945b69e3fbd: 2024-11-24T08:29:46,508 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-24T08:29:46,508 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:29:46,508 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/7ee0ebae4c1b4fb38edcaf4efaed877e because midkey is the same as first or last row 2024-11-24T08:29:48,167 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T08:29:52,353 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 88d8569a8c7ac25de3146945b69e3fbd, had cached 0 bytes from a total of 40219 2024-11-24T08:29:54,486 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T08:29:54,486 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:29:54,487 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:29:54,493 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:29:54,493 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:29:54,494 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T08:29:54,494 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=272026373, stopped=false 2024-11-24T08:29:54,494 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=30c28c82771d,38311,1732436899093 2024-11-24T08:29:54,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:29:54,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:29:54,496 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T08:29:54,497 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:29:54,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:29:54,497 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:29:54,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:29:54,497 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:29:54,497 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:29:54,497 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:29:54,498 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:29:54,498 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '30c28c82771d,40485,1732436899799' ***** 2024-11-24T08:29:54,498 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T08:29:54,499 INFO [RS:0;30c28c82771d:40485 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T08:29:54,499 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T08:29:54,499 INFO [RS:0;30c28c82771d:40485 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T08:29:54,499 INFO [RS:0;30c28c82771d:40485 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T08:29:54,500 INFO [RS:0;30c28c82771d:40485 {}] regionserver.HRegionServer(3091): Received CLOSE for 88d8569a8c7ac25de3146945b69e3fbd 2024-11-24T08:29:54,501 INFO [RS:0;30c28c82771d:40485 {}] regionserver.HRegionServer(959): stopping server 30c28c82771d,40485,1732436899799 2024-11-24T08:29:54,501 INFO [RS:0;30c28c82771d:40485 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:29:54,501 INFO [RS:0;30c28c82771d:40485 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;30c28c82771d:40485. 2024-11-24T08:29:54,501 DEBUG [RS:0;30c28c82771d:40485 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:29:54,501 DEBUG [RS:0;30c28c82771d:40485 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:29:54,501 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 88d8569a8c7ac25de3146945b69e3fbd, disabling compactions & flushes 2024-11-24T08:29:54,502 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd. 2024-11-24T08:29:54,502 INFO [RS:0;30c28c82771d:40485 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T08:29:54,502 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd. 2024-11-24T08:29:54,502 INFO [RS:0;30c28c82771d:40485 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T08:29:54,502 INFO [RS:0;30c28c82771d:40485 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T08:29:54,502 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd. after waiting 0 ms 2024-11-24T08:29:54,502 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd. 2024-11-24T08:29:54,502 INFO [RS:0;30c28c82771d:40485 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T08:29:54,502 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 88d8569a8c7ac25de3146945b69e3fbd 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-24T08:29:54,502 INFO [RS:0;30c28c82771d:40485 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T08:29:54,502 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:29:54,503 DEBUG [RS:0;30c28c82771d:40485 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 88d8569a8c7ac25de3146945b69e3fbd=TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd.} 2024-11-24T08:29:54,503 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:29:54,503 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:29:54,503 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:29:54,503 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:29:54,503 DEBUG [RS:0;30c28c82771d:40485 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 88d8569a8c7ac25de3146945b69e3fbd 2024-11-24T08:29:54,503 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-24T08:29:54,511 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/.tmp/info/929129f8cbf943aa94c5bddafddaefe8 is 1080, key is row0029/info:/1732436988475/Put/seqid=0 2024-11-24T08:29:54,532 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740/.tmp/info/8b53b63008124ee9b8c5838a66b86660 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd./info:regioninfo/1732436902375/Put/seqid=0 2024-11-24T08:29:54,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741850_1026 (size=8193) 2024-11-24T08:29:54,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741850_1026 (size=8193) 2024-11-24T08:29:54,540 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/.tmp/info/929129f8cbf943aa94c5bddafddaefe8 2024-11-24T08:29:54,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741851_1027 (size=7016) 2024-11-24T08:29:54,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741851_1027 (size=7016) 2024-11-24T08:29:54,551 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740/.tmp/info/8b53b63008124ee9b8c5838a66b86660 2024-11-24T08:29:54,554 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/.tmp/info/929129f8cbf943aa94c5bddafddaefe8 as hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/929129f8cbf943aa94c5bddafddaefe8 2024-11-24T08:29:54,566 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/929129f8cbf943aa94c5bddafddaefe8, entries=3, sequenceid=48, filesize=8.0 K 2024-11-24T08:29:54,569 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 88d8569a8c7ac25de3146945b69e3fbd in 66ms, sequenceid=48, compaction requested=true 2024-11-24T08:29:54,573 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/dcb76782dc9549e1b8a5ade463e3620a, hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/f38f88b3aad644efa48cb5c7ca34bede, hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/d8028667f68c4615a76f6cbe3bbcac7b] to archive 2024-11-24T08:29:54,577 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T08:29:54,581 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/dcb76782dc9549e1b8a5ade463e3620a to hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/archive/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/dcb76782dc9549e1b8a5ade463e3620a 2024-11-24T08:29:54,583 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/f38f88b3aad644efa48cb5c7ca34bede to hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/archive/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/f38f88b3aad644efa48cb5c7ca34bede 2024-11-24T08:29:54,597 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/d8028667f68c4615a76f6cbe3bbcac7b to hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/archive/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/info/d8028667f68c4615a76f6cbe3bbcac7b 2024-11-24T08:29:54,618 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740/.tmp/ns/ad74ea9a04244546923f138af3d2d4f5 is 43, key is default/ns:d/1732436901618/Put/seqid=0 2024-11-24T08:29:54,615 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=30c28c82771d:38311 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-24T08:29:54,621 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [dcb76782dc9549e1b8a5ade463e3620a=12509, f38f88b3aad644efa48cb5c7ca34bede=12509, d8028667f68c4615a76f6cbe3bbcac7b=12509] 2024-11-24T08:29:54,637 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/default/TestLogRolling-testSlowSyncLogRolling/88d8569a8c7ac25de3146945b69e3fbd/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-24T08:29:54,641 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd. 2024-11-24T08:29:54,641 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 88d8569a8c7ac25de3146945b69e3fbd: Waiting for close lock at 1732436994501Running coprocessor pre-close hooks at 1732436994501Disabling compacts and flushes for region at 1732436994501Disabling writes for close at 1732436994502 (+1 ms)Obtaining lock to block concurrent updates at 1732436994502Preparing flush snapshotting stores in 88d8569a8c7ac25de3146945b69e3fbd at 1732436994502Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732436994503 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd. at 1732436994504 (+1 ms)Flushing 88d8569a8c7ac25de3146945b69e3fbd/info: creating writer at 1732436994504Flushing 88d8569a8c7ac25de3146945b69e3fbd/info: appending metadata at 1732436994510 (+6 ms)Flushing 88d8569a8c7ac25de3146945b69e3fbd/info: closing flushed file at 1732436994510Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14bf4ea7: reopening flushed file at 1732436994552 (+42 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 88d8569a8c7ac25de3146945b69e3fbd in 66ms, sequenceid=48, compaction requested=true at 1732436994569 (+17 ms)Writing region close event to WAL at 1732436994625 (+56 ms)Running coprocessor post-close hooks at 1732436994639 (+14 ms)Closed at 1732436994641 (+2 ms) 2024-11-24T08:29:54,642 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732436901900.88d8569a8c7ac25de3146945b69e3fbd. 2024-11-24T08:29:54,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741852_1028 (size=5153) 2024-11-24T08:29:54,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741852_1028 (size=5153) 2024-11-24T08:29:54,703 DEBUG [RS:0;30c28c82771d:40485 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T08:29:54,903 DEBUG [RS:0;30c28c82771d:40485 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T08:29:54,907 INFO [regionserver/30c28c82771d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T08:29:54,907 INFO [regionserver/30c28c82771d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T08:29:54,908 INFO [regionserver/30c28c82771d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:29:55,046 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740/.tmp/ns/ad74ea9a04244546923f138af3d2d4f5 2024-11-24T08:29:55,084 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740/.tmp/table/93935dad80414d96a64e83ec4a73919c is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732436902392/Put/seqid=0 2024-11-24T08:29:55,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741853_1029 (size=5396) 2024-11-24T08:29:55,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741853_1029 (size=5396) 2024-11-24T08:29:55,091 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740/.tmp/table/93935dad80414d96a64e83ec4a73919c 2024-11-24T08:29:55,099 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740/.tmp/info/8b53b63008124ee9b8c5838a66b86660 as hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740/info/8b53b63008124ee9b8c5838a66b86660 2024-11-24T08:29:55,104 DEBUG [RS:0;30c28c82771d:40485 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T08:29:55,107 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740/info/8b53b63008124ee9b8c5838a66b86660, entries=10, sequenceid=11, filesize=6.9 K 2024-11-24T08:29:55,108 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740/.tmp/ns/ad74ea9a04244546923f138af3d2d4f5 as hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740/ns/ad74ea9a04244546923f138af3d2d4f5 2024-11-24T08:29:55,116 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740/ns/ad74ea9a04244546923f138af3d2d4f5, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T08:29:55,117 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740/.tmp/table/93935dad80414d96a64e83ec4a73919c as hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740/table/93935dad80414d96a64e83ec4a73919c 2024-11-24T08:29:55,127 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740/table/93935dad80414d96a64e83ec4a73919c, entries=2, sequenceid=11, filesize=5.3 K 2024-11-24T08:29:55,129 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 625ms, sequenceid=11, compaction requested=false 2024-11-24T08:29:55,135 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T08:29:55,136 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:29:55,136 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:29:55,136 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732436994502Running coprocessor pre-close hooks at 1732436994502Disabling compacts and flushes for region at 1732436994502Disabling writes for close at 1732436994503 (+1 ms)Obtaining lock to block concurrent updates at 1732436994503Preparing flush snapshotting stores in 1588230740 at 1732436994503Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732436994504 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732436994505 (+1 ms)Flushing 1588230740/info: creating writer at 1732436994505Flushing 1588230740/info: appending metadata at 1732436994531 (+26 ms)Flushing 1588230740/info: closing flushed file at 1732436994532 (+1 ms)Flushing 1588230740/ns: creating writer at 1732436994566 (+34 ms)Flushing 1588230740/ns: appending metadata at 1732436994617 (+51 ms)Flushing 1588230740/ns: closing flushed file at 1732436994617Flushing 1588230740/table: creating writer at 1732436995064 (+447 ms)Flushing 1588230740/table: appending metadata at 1732436995083 (+19 ms)Flushing 1588230740/table: closing flushed file at 1732436995083Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@71979eb8: reopening flushed file at 1732436995098 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a5fd345: reopening flushed file at 1732436995107 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5ceca189: reopening flushed file at 1732436995116 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 625ms, sequenceid=11, compaction requested=false at 1732436995129 (+13 ms)Writing region close event to WAL at 1732436995130 (+1 ms)Running coprocessor post-close hooks at 1732436995136 (+6 ms)Closed at 1732436995136 2024-11-24T08:29:55,137 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T08:29:55,304 INFO [RS:0;30c28c82771d:40485 {}] regionserver.HRegionServer(976): stopping server 30c28c82771d,40485,1732436899799; all regions closed. 2024-11-24T08:29:55,306 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:55,306 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:55,306 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:55,306 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:55,306 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:55,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741834_1010 (size=3066) 2024-11-24T08:29:55,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741834_1010 (size=3066) 2024-11-24T08:29:55,313 DEBUG [RS:0;30c28c82771d:40485 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/oldWALs 2024-11-24T08:29:55,313 INFO [RS:0;30c28c82771d:40485 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30c28c82771d%2C40485%2C1732436899799.meta:.meta(num 1732436901469) 2024-11-24T08:29:55,314 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:55,314 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:55,314 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:55,314 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:55,314 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:55,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741847_1023 (size=12695) 2024-11-24T08:29:55,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741847_1023 (size=12695) 2024-11-24T08:29:55,321 DEBUG [RS:0;30c28c82771d:40485 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/oldWALs 2024-11-24T08:29:55,321 INFO [RS:0;30c28c82771d:40485 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30c28c82771d%2C40485%2C1732436899799:(num 1732436974445) 2024-11-24T08:29:55,321 DEBUG [RS:0;30c28c82771d:40485 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:29:55,321 INFO [RS:0;30c28c82771d:40485 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:29:55,321 INFO [RS:0;30c28c82771d:40485 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:29:55,322 INFO [RS:0;30c28c82771d:40485 {}] hbase.ChoreService(370): Chore service for: regionserver/30c28c82771d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T08:29:55,322 INFO [RS:0;30c28c82771d:40485 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:29:55,322 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:29:55,322 INFO [RS:0;30c28c82771d:40485 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40485 2024-11-24T08:29:55,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/30c28c82771d,40485,1732436899799 2024-11-24T08:29:55,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:29:55,327 INFO [RS:0;30c28c82771d:40485 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:29:55,329 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [30c28c82771d,40485,1732436899799] 2024-11-24T08:29:55,331 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/30c28c82771d,40485,1732436899799 already deleted, retry=false 2024-11-24T08:29:55,331 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 30c28c82771d,40485,1732436899799 expired; onlineServers=0 2024-11-24T08:29:55,331 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '30c28c82771d,38311,1732436899093' ***** 2024-11-24T08:29:55,331 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T08:29:55,331 INFO [M:0;30c28c82771d:38311 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:29:55,332 INFO [M:0;30c28c82771d:38311 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:29:55,332 DEBUG [M:0;30c28c82771d:38311 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T08:29:55,332 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T08:29:55,332 DEBUG [M:0;30c28c82771d:38311 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T08:29:55,332 DEBUG [master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732436900753 {}] cleaner.HFileCleaner(306): Exit Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732436900753,5,FailOnTimeoutGroup] 2024-11-24T08:29:55,332 DEBUG [master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732436900750 {}] cleaner.HFileCleaner(306): Exit Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732436900750,5,FailOnTimeoutGroup] 2024-11-24T08:29:55,332 INFO [M:0;30c28c82771d:38311 {}] hbase.ChoreService(370): Chore service for: master/30c28c82771d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T08:29:55,332 INFO [M:0;30c28c82771d:38311 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:29:55,332 DEBUG [M:0;30c28c82771d:38311 {}] master.HMaster(1795): Stopping service threads 2024-11-24T08:29:55,332 INFO [M:0;30c28c82771d:38311 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T08:29:55,332 INFO [M:0;30c28c82771d:38311 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:29:55,333 INFO [M:0;30c28c82771d:38311 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T08:29:55,333 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T08:29:55,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T08:29:55,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:29:55,334 DEBUG [M:0;30c28c82771d:38311 {}] zookeeper.ZKUtil(347): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T08:29:55,334 WARN [M:0;30c28c82771d:38311 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T08:29:55,335 INFO [M:0;30c28c82771d:38311 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/.lastflushedseqids 2024-11-24T08:29:55,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741854_1030 (size=130) 2024-11-24T08:29:55,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741854_1030 (size=130) 2024-11-24T08:29:55,349 INFO [M:0;30c28c82771d:38311 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T08:29:55,349 INFO [M:0;30c28c82771d:38311 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T08:29:55,349 DEBUG [M:0;30c28c82771d:38311 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:29:55,349 INFO [M:0;30c28c82771d:38311 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:29:55,349 DEBUG [M:0;30c28c82771d:38311 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:29:55,349 DEBUG [M:0;30c28c82771d:38311 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:29:55,349 DEBUG [M:0;30c28c82771d:38311 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:29:55,349 INFO [M:0;30c28c82771d:38311 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-24T08:29:55,368 DEBUG [M:0;30c28c82771d:38311 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ba819ad41fb04bff83e679044ee8c55c is 82, key is hbase:meta,,1/info:regioninfo/1732436901544/Put/seqid=0 2024-11-24T08:29:55,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741855_1031 (size=5672) 2024-11-24T08:29:55,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741855_1031 (size=5672) 2024-11-24T08:29:55,378 INFO [M:0;30c28c82771d:38311 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ba819ad41fb04bff83e679044ee8c55c 2024-11-24T08:29:55,405 DEBUG [M:0;30c28c82771d:38311 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/18ded2a001a54fc390f74f1f346b5d56 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732436902399/Put/seqid=0 2024-11-24T08:29:55,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741856_1032 (size=6247) 2024-11-24T08:29:55,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741856_1032 (size=6247) 2024-11-24T08:29:55,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:29:55,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40485-0x10149171c430001, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:29:55,430 INFO [RS:0;30c28c82771d:40485 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:29:55,430 INFO [RS:0;30c28c82771d:40485 {}] regionserver.HRegionServer(1031): Exiting; stopping=30c28c82771d,40485,1732436899799; zookeeper connection closed. 2024-11-24T08:29:55,430 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7368c979 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7368c979 2024-11-24T08:29:55,431 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T08:29:55,814 INFO [M:0;30c28c82771d:38311 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/18ded2a001a54fc390f74f1f346b5d56 2024-11-24T08:29:55,823 INFO [M:0;30c28c82771d:38311 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 18ded2a001a54fc390f74f1f346b5d56 2024-11-24T08:29:55,853 DEBUG [M:0;30c28c82771d:38311 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/77d8adf168db4a5cb4ba2df457a495ae is 69, key is 30c28c82771d,40485,1732436899799/rs:state/1732436900830/Put/seqid=0 2024-11-24T08:29:55,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741857_1033 (size=5156) 2024-11-24T08:29:55,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741857_1033 (size=5156) 2024-11-24T08:29:55,871 INFO [M:0;30c28c82771d:38311 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/77d8adf168db4a5cb4ba2df457a495ae 2024-11-24T08:29:55,900 DEBUG [M:0;30c28c82771d:38311 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/97b74d9fb6b041068fa1291801dd8441 is 52, key is load_balancer_on/state:d/1732436901879/Put/seqid=0 2024-11-24T08:29:55,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741858_1034 (size=5056) 2024-11-24T08:29:55,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741858_1034 (size=5056) 2024-11-24T08:29:55,912 INFO [M:0;30c28c82771d:38311 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/97b74d9fb6b041068fa1291801dd8441 2024-11-24T08:29:55,921 DEBUG [M:0;30c28c82771d:38311 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ba819ad41fb04bff83e679044ee8c55c as hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ba819ad41fb04bff83e679044ee8c55c 2024-11-24T08:29:55,931 INFO [M:0;30c28c82771d:38311 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ba819ad41fb04bff83e679044ee8c55c, entries=8, sequenceid=59, filesize=5.5 K 2024-11-24T08:29:55,933 DEBUG [M:0;30c28c82771d:38311 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/18ded2a001a54fc390f74f1f346b5d56 as hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/18ded2a001a54fc390f74f1f346b5d56 2024-11-24T08:29:55,942 INFO [M:0;30c28c82771d:38311 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 18ded2a001a54fc390f74f1f346b5d56 2024-11-24T08:29:55,942 INFO [M:0;30c28c82771d:38311 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/18ded2a001a54fc390f74f1f346b5d56, entries=6, sequenceid=59, filesize=6.1 K 2024-11-24T08:29:55,944 DEBUG [M:0;30c28c82771d:38311 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/77d8adf168db4a5cb4ba2df457a495ae as hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/77d8adf168db4a5cb4ba2df457a495ae 2024-11-24T08:29:55,956 INFO [M:0;30c28c82771d:38311 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/77d8adf168db4a5cb4ba2df457a495ae, entries=1, sequenceid=59, filesize=5.0 K 2024-11-24T08:29:55,958 DEBUG [M:0;30c28c82771d:38311 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/97b74d9fb6b041068fa1291801dd8441 as hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/97b74d9fb6b041068fa1291801dd8441 2024-11-24T08:29:55,968 INFO [M:0;30c28c82771d:38311 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/97b74d9fb6b041068fa1291801dd8441, entries=1, sequenceid=59, filesize=4.9 K 2024-11-24T08:29:55,971 INFO [M:0;30c28c82771d:38311 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 621ms, sequenceid=59, compaction requested=false 2024-11-24T08:29:55,977 INFO [M:0;30c28c82771d:38311 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:29:55,977 DEBUG [M:0;30c28c82771d:38311 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732436995349Disabling compacts and flushes for region at 1732436995349Disabling writes for close at 1732436995349Obtaining lock to block concurrent updates at 1732436995349Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732436995349Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1732436995350 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732436995351 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732436995351Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732436995368 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732436995368Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732436995386 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732436995404 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732436995404Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732436995823 (+419 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732436995852 (+29 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732436995852Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732436995880 (+28 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732436995900 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732436995900Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a79102e: reopening flushed file at 1732436995920 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d870983: reopening flushed file at 1732436995931 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7b22f9c2: reopening flushed file at 1732436995943 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1fee4d6: reopening flushed file at 1732436995957 (+14 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 621ms, sequenceid=59, compaction requested=false at 1732436995971 (+14 ms)Writing region close event to WAL at 1732436995977 (+6 ms)Closed at 1732436995977 2024-11-24T08:29:55,978 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:55,978 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:55,978 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:55,979 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:55,979 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:55,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45027 is added to blk_1073741830_1006 (size=27973) 2024-11-24T08:29:55,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45079 is added to blk_1073741830_1006 (size=27973) 2024-11-24T08:29:55,989 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:29:55,989 INFO [M:0;30c28c82771d:38311 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T08:29:55,990 INFO [M:0;30c28c82771d:38311 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38311 2024-11-24T08:29:55,990 INFO [M:0;30c28c82771d:38311 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:29:56,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:29:56,092 INFO [M:0;30c28c82771d:38311 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:29:56,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38311-0x10149171c430000, quorum=127.0.0.1:52809, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:29:56,104 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:29:56,107 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:29:56,107 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:29:56,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:29:56,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/hadoop.log.dir/,STOPPED} 2024-11-24T08:29:56,113 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:29:56,113 WARN [BP-447478019-172.17.0.2-1732436896116 heartbeating to localhost/127.0.0.1:36899 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:29:56,113 WARN [BP-447478019-172.17.0.2-1732436896116 heartbeating to localhost/127.0.0.1:36899 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-447478019-172.17.0.2-1732436896116 (Datanode Uuid c7ce254c-1eb9-492a-92f8-bb60a797fa6f) service to localhost/127.0.0.1:36899 2024-11-24T08:29:56,113 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:29:56,116 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/cluster_85508b28-1dc5-893e-0cc9-827ba1015c20/data/data3/current/BP-447478019-172.17.0.2-1732436896116 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:29:56,116 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/cluster_85508b28-1dc5-893e-0cc9-827ba1015c20/data/data4/current/BP-447478019-172.17.0.2-1732436896116 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:29:56,117 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:29:56,129 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:29:56,130 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:29:56,130 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:29:56,130 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:29:56,131 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/hadoop.log.dir/,STOPPED} 2024-11-24T08:29:56,139 WARN [BP-447478019-172.17.0.2-1732436896116 heartbeating to localhost/127.0.0.1:36899 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:29:56,139 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:29:56,139 WARN [BP-447478019-172.17.0.2-1732436896116 heartbeating to localhost/127.0.0.1:36899 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-447478019-172.17.0.2-1732436896116 (Datanode Uuid ffb2e774-6d02-4471-ac2d-61106e0b6ff7) service to localhost/127.0.0.1:36899 2024-11-24T08:29:56,139 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:29:56,140 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/cluster_85508b28-1dc5-893e-0cc9-827ba1015c20/data/data1/current/BP-447478019-172.17.0.2-1732436896116 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:29:56,141 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/cluster_85508b28-1dc5-893e-0cc9-827ba1015c20/data/data2/current/BP-447478019-172.17.0.2-1732436896116 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:29:56,141 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:29:56,158 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:29:56,159 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:29:56,159 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:29:56,160 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:29:56,160 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/hadoop.log.dir/,STOPPED} 2024-11-24T08:29:56,169 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T08:29:56,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T08:29:56,219 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=80 (was 12) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/30c28c82771d:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/30c28c82771d:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36899 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36899 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36899 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36899 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/30c28c82771d:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36899 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@5f8f8226 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36899 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36899 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:36899 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=407 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=169 (was 185), ProcessCount=11 (was 11), AvailableMemoryMB=6678 (was 7077) 2024-11-24T08:29:56,237 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=81, OpenFileDescriptor=407, MaxFileDescriptor=1048576, SystemLoadAverage=169, ProcessCount=11, AvailableMemoryMB=6678 2024-11-24T08:29:56,237 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T08:29:56,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/hadoop.log.dir so I do NOT create it in target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd 2024-11-24T08:29:56,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28f02e70-d19e-9437-e061-7b41e1746916/hadoop.tmp.dir so I do NOT create it in target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd 2024-11-24T08:29:56,240 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/cluster_e7b0f16b-cf89-2f41-5dfd-971534ff60e5, deleteOnExit=true 2024-11-24T08:29:56,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T08:29:56,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/test.cache.data in system properties and HBase conf 2024-11-24T08:29:56,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T08:29:56,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/hadoop.log.dir in system properties and HBase conf 2024-11-24T08:29:56,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T08:29:56,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T08:29:56,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T08:29:56,241 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T08:29:56,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:29:56,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:29:56,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T08:29:56,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:29:56,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T08:29:56,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T08:29:56,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:29:56,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:29:56,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T08:29:56,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/nfs.dump.dir in system properties and HBase conf 2024-11-24T08:29:56,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/java.io.tmpdir in system properties and HBase conf 2024-11-24T08:29:56,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:29:56,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T08:29:56,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T08:29:56,260 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:29:56,364 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:29:56,373 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:29:56,374 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:29:56,374 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:29:56,374 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:29:56,375 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:29:56,376 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@630e1a46{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:29:56,377 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d9de743{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:29:56,522 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bc50e3b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/java.io.tmpdir/jetty-localhost-33209-hadoop-hdfs-3_4_1-tests_jar-_-any-15156601295694691774/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:29:56,523 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@8c19991{HTTP/1.1, (http/1.1)}{localhost:33209} 2024-11-24T08:29:56,523 INFO [Time-limited test {}] server.Server(415): Started @102646ms 2024-11-24T08:29:56,541 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:29:56,630 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:29:56,634 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:29:56,635 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:29:56,635 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:29:56,635 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:29:56,636 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3270c9ae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:29:56,636 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4620cd8a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:29:56,756 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78fa6004{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/java.io.tmpdir/jetty-localhost-35499-hadoop-hdfs-3_4_1-tests_jar-_-any-16044570292906313747/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:29:56,756 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6b5fac92{HTTP/1.1, (http/1.1)}{localhost:35499} 2024-11-24T08:29:56,756 INFO [Time-limited test {}] server.Server(415): Started @102879ms 2024-11-24T08:29:56,758 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:29:56,806 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:29:56,811 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:29:56,811 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:29:56,811 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:29:56,812 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:29:56,812 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3c30f553{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:29:56,812 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ab06e68{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:29:56,879 WARN [Thread-439 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/cluster_e7b0f16b-cf89-2f41-5dfd-971534ff60e5/data/data2/current/BP-516107162-172.17.0.2-1732436996280/current, will proceed with Du for space computation calculation, 2024-11-24T08:29:56,879 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/cluster_e7b0f16b-cf89-2f41-5dfd-971534ff60e5/data/data1/current/BP-516107162-172.17.0.2-1732436996280/current, will proceed with Du for space computation calculation, 2024-11-24T08:29:56,901 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:29:56,904 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x22c482d6b2208cf with lease ID 0x16680755494fbb7a: Processing first storage report for DS-21411850-b253-4613-a1bc-e7575de3bea8 from datanode DatanodeRegistration(127.0.0.1:43937, datanodeUuid=39f0a1cb-008a-4808-8a4c-950ae8a09812, infoPort=38251, infoSecurePort=0, ipcPort=35515, storageInfo=lv=-57;cid=testClusterID;nsid=1742687450;c=1732436996280) 2024-11-24T08:29:56,904 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x22c482d6b2208cf with lease ID 0x16680755494fbb7a: from storage DS-21411850-b253-4613-a1bc-e7575de3bea8 node DatanodeRegistration(127.0.0.1:43937, datanodeUuid=39f0a1cb-008a-4808-8a4c-950ae8a09812, infoPort=38251, infoSecurePort=0, ipcPort=35515, storageInfo=lv=-57;cid=testClusterID;nsid=1742687450;c=1732436996280), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:29:56,904 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x22c482d6b2208cf with lease ID 0x16680755494fbb7a: Processing first storage report for DS-982edd14-cf1c-4f27-aa6e-29ea4b8f0e19 from datanode DatanodeRegistration(127.0.0.1:43937, datanodeUuid=39f0a1cb-008a-4808-8a4c-950ae8a09812, infoPort=38251, infoSecurePort=0, ipcPort=35515, storageInfo=lv=-57;cid=testClusterID;nsid=1742687450;c=1732436996280) 2024-11-24T08:29:56,905 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x22c482d6b2208cf with lease ID 0x16680755494fbb7a: from storage DS-982edd14-cf1c-4f27-aa6e-29ea4b8f0e19 node DatanodeRegistration(127.0.0.1:43937, datanodeUuid=39f0a1cb-008a-4808-8a4c-950ae8a09812, infoPort=38251, infoSecurePort=0, ipcPort=35515, storageInfo=lv=-57;cid=testClusterID;nsid=1742687450;c=1732436996280), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T08:29:56,939 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@38d766c7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/java.io.tmpdir/jetty-localhost-41329-hadoop-hdfs-3_4_1-tests_jar-_-any-12256261039188398462/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:29:56,939 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@12686d25{HTTP/1.1, (http/1.1)}{localhost:41329} 2024-11-24T08:29:56,939 INFO [Time-limited test {}] server.Server(415): Started @103062ms 2024-11-24T08:29:56,941 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:29:57,063 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/cluster_e7b0f16b-cf89-2f41-5dfd-971534ff60e5/data/data3/current/BP-516107162-172.17.0.2-1732436996280/current, will proceed with Du for space computation calculation, 2024-11-24T08:29:57,063 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/cluster_e7b0f16b-cf89-2f41-5dfd-971534ff60e5/data/data4/current/BP-516107162-172.17.0.2-1732436996280/current, will proceed with Du for space computation calculation, 2024-11-24T08:29:57,084 WARN [Thread-453 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:29:57,088 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x18e39b06b52398d0 with lease ID 0x16680755494fbb7b: Processing first storage report for DS-177f2635-16bc-49da-bb21-2584417e0afc from datanode DatanodeRegistration(127.0.0.1:45857, datanodeUuid=6d690175-a24f-4343-849b-c4eeecb9811f, infoPort=41465, infoSecurePort=0, ipcPort=38299, storageInfo=lv=-57;cid=testClusterID;nsid=1742687450;c=1732436996280) 2024-11-24T08:29:57,088 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x18e39b06b52398d0 with lease ID 0x16680755494fbb7b: from storage DS-177f2635-16bc-49da-bb21-2584417e0afc node DatanodeRegistration(127.0.0.1:45857, datanodeUuid=6d690175-a24f-4343-849b-c4eeecb9811f, infoPort=41465, infoSecurePort=0, ipcPort=38299, storageInfo=lv=-57;cid=testClusterID;nsid=1742687450;c=1732436996280), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:29:57,088 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x18e39b06b52398d0 with lease ID 0x16680755494fbb7b: Processing first storage report for DS-f602974e-a23e-4dda-9a3b-fb0ab7d62d62 from datanode DatanodeRegistration(127.0.0.1:45857, datanodeUuid=6d690175-a24f-4343-849b-c4eeecb9811f, infoPort=41465, infoSecurePort=0, ipcPort=38299, storageInfo=lv=-57;cid=testClusterID;nsid=1742687450;c=1732436996280) 2024-11-24T08:29:57,088 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x18e39b06b52398d0 with lease ID 0x16680755494fbb7b: from storage DS-f602974e-a23e-4dda-9a3b-fb0ab7d62d62 node DatanodeRegistration(127.0.0.1:45857, datanodeUuid=6d690175-a24f-4343-849b-c4eeecb9811f, infoPort=41465, infoSecurePort=0, ipcPort=38299, storageInfo=lv=-57;cid=testClusterID;nsid=1742687450;c=1732436996280), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:29:57,174 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd 2024-11-24T08:29:57,177 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/cluster_e7b0f16b-cf89-2f41-5dfd-971534ff60e5/zookeeper_0, clientPort=51977, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/cluster_e7b0f16b-cf89-2f41-5dfd-971534ff60e5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/cluster_e7b0f16b-cf89-2f41-5dfd-971534ff60e5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T08:29:57,178 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51977 2024-11-24T08:29:57,178 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:29:57,180 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:29:57,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45857 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:29:57,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43937 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:29:57,192 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c with version=8 2024-11-24T08:29:57,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/hbase-staging 2024-11-24T08:29:57,194 INFO [Time-limited test {}] client.ConnectionUtils(128): master/30c28c82771d:0 server-side Connection retries=45 2024-11-24T08:29:57,194 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:29:57,194 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:29:57,194 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:29:57,194 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:29:57,194 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:29:57,194 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T08:29:57,195 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:29:57,195 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40421 2024-11-24T08:29:57,197 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40421 connecting to ZooKeeper ensemble=127.0.0.1:51977 2024-11-24T08:29:57,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:404210x0, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:29:57,207 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40421-0x10149189ec80000 connected 2024-11-24T08:29:57,227 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:29:57,229 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:29:57,231 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:29:57,231 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c, hbase.cluster.distributed=false 2024-11-24T08:29:57,233 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:29:57,234 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40421 2024-11-24T08:29:57,234 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40421 2024-11-24T08:29:57,234 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40421 2024-11-24T08:29:57,235 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40421 2024-11-24T08:29:57,235 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40421 2024-11-24T08:29:57,252 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/30c28c82771d:0 server-side Connection retries=45 2024-11-24T08:29:57,252 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:29:57,252 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:29:57,252 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:29:57,252 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:29:57,252 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:29:57,252 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T08:29:57,252 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:29:57,253 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35553 2024-11-24T08:29:57,254 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35553 connecting to ZooKeeper ensemble=127.0.0.1:51977 2024-11-24T08:29:57,255 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:29:57,257 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:29:57,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:355530x0, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:29:57,263 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35553-0x10149189ec80001 connected 2024-11-24T08:29:57,263 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:29:57,264 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T08:29:57,265 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T08:29:57,266 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T08:29:57,267 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:29:57,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35553 2024-11-24T08:29:57,272 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35553 2024-11-24T08:29:57,276 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35553 2024-11-24T08:29:57,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35553 2024-11-24T08:29:57,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35553 2024-11-24T08:29:57,294 DEBUG [M:0;30c28c82771d:40421 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;30c28c82771d:40421 2024-11-24T08:29:57,294 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/30c28c82771d,40421,1732436997194 2024-11-24T08:29:57,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:29:57,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:29:57,297 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/30c28c82771d,40421,1732436997194 2024-11-24T08:29:57,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T08:29:57,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:29:57,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:29:57,300 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T08:29:57,300 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/30c28c82771d,40421,1732436997194 from backup master directory 2024-11-24T08:29:57,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:29:57,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/30c28c82771d,40421,1732436997194 2024-11-24T08:29:57,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:29:57,301 WARN [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:29:57,302 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=30c28c82771d,40421,1732436997194 2024-11-24T08:29:57,307 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/hbase.id] with ID: b584536f-58c4-430b-9063-97a32737fd0a 2024-11-24T08:29:57,307 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/.tmp/hbase.id 2024-11-24T08:29:57,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43937 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:29:57,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45857 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:29:57,719 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/.tmp/hbase.id]:[hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/hbase.id] 2024-11-24T08:29:57,744 INFO [master/30c28c82771d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:29:57,744 INFO [master/30c28c82771d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T08:29:57,747 INFO [master/30c28c82771d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-11-24T08:29:57,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:29:57,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:29:57,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45857 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:29:57,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43937 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:29:57,770 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:29:57,772 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T08:29:57,772 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:29:57,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45857 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:29:57,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43937 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:29:57,787 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store 2024-11-24T08:29:57,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45857 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:29:57,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43937 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:29:57,796 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:29:57,797 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:29:57,797 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:29:57,797 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:29:57,797 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:29:57,797 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:29:57,797 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:29:57,797 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732436997797Disabling compacts and flushes for region at 1732436997797Disabling writes for close at 1732436997797Writing region close event to WAL at 1732436997797Closed at 1732436997797 2024-11-24T08:29:57,798 WARN [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/.initializing 2024-11-24T08:29:57,798 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/WALs/30c28c82771d,40421,1732436997194 2024-11-24T08:29:57,802 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C40421%2C1732436997194, suffix=, logDir=hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/WALs/30c28c82771d,40421,1732436997194, archiveDir=hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/oldWALs, maxLogs=10 2024-11-24T08:29:57,802 INFO [master/30c28c82771d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C40421%2C1732436997194.1732436997802 2024-11-24T08:29:57,807 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/WALs/30c28c82771d,40421,1732436997194/30c28c82771d%2C40421%2C1732436997194.1732436997802 2024-11-24T08:29:57,808 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41465:41465),(127.0.0.1/127.0.0.1:38251:38251)] 2024-11-24T08:29:57,809 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:29:57,809 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:29:57,809 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:29:57,809 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:29:57,811 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:29:57,813 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T08:29:57,813 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:29:57,813 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:29:57,814 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:29:57,815 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T08:29:57,815 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:29:57,816 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:29:57,816 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:29:57,818 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T08:29:57,818 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:29:57,819 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:29:57,819 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:29:57,820 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T08:29:57,820 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:29:57,821 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:29:57,821 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:29:57,822 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:29:57,822 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:29:57,823 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:29:57,823 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:29:57,824 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T08:29:57,825 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:29:57,828 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:29:57,829 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=734960, jitterRate=-0.06545113027095795}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T08:29:57,830 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732436997810Initializing all the Stores at 1732436997811 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732436997811Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732436997811Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732436997811Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732436997811Cleaning up temporary data from old regions at 1732436997824 (+13 ms)Region opened successfully at 1732436997830 (+6 ms) 2024-11-24T08:29:57,830 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T08:29:57,834 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53b792e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30c28c82771d/172.17.0.2:0 2024-11-24T08:29:57,835 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T08:29:57,836 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T08:29:57,836 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T08:29:57,836 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T08:29:57,836 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T08:29:57,837 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T08:29:57,837 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T08:29:57,839 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T08:29:57,840 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T08:29:57,843 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T08:29:57,843 INFO [master/30c28c82771d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T08:29:57,844 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T08:29:57,845 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T08:29:57,845 INFO [master/30c28c82771d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T08:29:57,846 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T08:29:57,847 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T08:29:57,848 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T08:29:57,850 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T08:29:57,852 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T08:29:57,854 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T08:29:57,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:29:57,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:29:57,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:29:57,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:29:57,856 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=30c28c82771d,40421,1732436997194, sessionid=0x10149189ec80000, setting cluster-up flag (Was=false) 2024-11-24T08:29:57,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:29:57,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:29:57,864 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T08:29:57,865 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30c28c82771d,40421,1732436997194 2024-11-24T08:29:57,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:29:57,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:29:57,873 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T08:29:57,874 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30c28c82771d,40421,1732436997194 2024-11-24T08:29:57,876 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T08:29:57,878 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T08:29:57,878 INFO [master/30c28c82771d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T08:29:57,878 INFO [master/30c28c82771d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T08:29:57,878 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 30c28c82771d,40421,1732436997194 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T08:29:57,880 INFO [RS:0;30c28c82771d:35553 {}] regionserver.HRegionServer(746): ClusterId : b584536f-58c4-430b-9063-97a32737fd0a 2024-11-24T08:29:57,880 DEBUG [RS:0;30c28c82771d:35553 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T08:29:57,880 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:29:57,880 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:29:57,880 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:29:57,880 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:29:57,880 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/30c28c82771d:0, corePoolSize=10, maxPoolSize=10 2024-11-24T08:29:57,881 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:29:57,881 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/30c28c82771d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:29:57,881 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:29:57,882 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732437027882 2024-11-24T08:29:57,882 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T08:29:57,882 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T08:29:57,882 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T08:29:57,882 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T08:29:57,883 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T08:29:57,883 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T08:29:57,883 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:57,883 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:29:57,883 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T08:29:57,883 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T08:29:57,884 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T08:29:57,884 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T08:29:57,884 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T08:29:57,884 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T08:29:57,884 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732436997884,5,FailOnTimeoutGroup] 2024-11-24T08:29:57,885 DEBUG [RS:0;30c28c82771d:35553 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T08:29:57,885 DEBUG [RS:0;30c28c82771d:35553 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T08:29:57,885 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732436997884,5,FailOnTimeoutGroup] 2024-11-24T08:29:57,885 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:29:57,885 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:57,885 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T08:29:57,885 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:57,885 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:57,885 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T08:29:57,887 DEBUG [RS:0;30c28c82771d:35553 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T08:29:57,887 DEBUG [RS:0;30c28c82771d:35553 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cc9d309, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30c28c82771d/172.17.0.2:0 2024-11-24T08:29:57,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43937 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:29:57,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45857 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:29:57,899 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T08:29:57,899 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c 2024-11-24T08:29:57,903 DEBUG [RS:0;30c28c82771d:35553 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;30c28c82771d:35553 2024-11-24T08:29:57,903 INFO [RS:0;30c28c82771d:35553 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T08:29:57,903 INFO [RS:0;30c28c82771d:35553 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T08:29:57,904 DEBUG [RS:0;30c28c82771d:35553 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T08:29:57,904 INFO [RS:0;30c28c82771d:35553 {}] regionserver.HRegionServer(2659): reportForDuty to master=30c28c82771d,40421,1732436997194 with port=35553, startcode=1732436997251 2024-11-24T08:29:57,905 DEBUG [RS:0;30c28c82771d:35553 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T08:29:57,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43937 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:29:57,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45857 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:29:57,908 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43467, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T08:29:57,909 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40421 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 30c28c82771d,35553,1732436997251 2024-11-24T08:29:57,909 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40421 {}] master.ServerManager(517): Registering regionserver=30c28c82771d,35553,1732436997251 2024-11-24T08:29:57,911 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:29:57,911 DEBUG [RS:0;30c28c82771d:35553 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c 2024-11-24T08:29:57,911 DEBUG [RS:0;30c28c82771d:35553 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40285 2024-11-24T08:29:57,911 DEBUG [RS:0;30c28c82771d:35553 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T08:29:57,912 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:29:57,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:29:57,913 DEBUG [RS:0;30c28c82771d:35553 {}] zookeeper.ZKUtil(111): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/30c28c82771d,35553,1732436997251 2024-11-24T08:29:57,913 WARN [RS:0;30c28c82771d:35553 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:29:57,914 INFO [RS:0;30c28c82771d:35553 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:29:57,914 DEBUG [RS:0;30c28c82771d:35553 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/WALs/30c28c82771d,35553,1732436997251 2024-11-24T08:29:57,914 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [30c28c82771d,35553,1732436997251] 2024-11-24T08:29:57,914 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:29:57,914 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:29:57,915 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:29:57,915 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:29:57,916 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:29:57,917 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:29:57,917 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:29:57,917 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:29:57,918 INFO [RS:0;30c28c82771d:35553 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T08:29:57,919 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:29:57,919 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:29:57,920 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:29:57,920 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:29:57,922 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:29:57,922 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:29:57,922 INFO [RS:0;30c28c82771d:35553 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T08:29:57,923 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:29:57,923 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:29:57,923 INFO [RS:0;30c28c82771d:35553 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T08:29:57,923 INFO [RS:0;30c28c82771d:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:57,923 INFO [RS:0;30c28c82771d:35553 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T08:29:57,924 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/data/hbase/meta/1588230740 2024-11-24T08:29:57,924 INFO [RS:0;30c28c82771d:35553 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T08:29:57,924 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/data/hbase/meta/1588230740 2024-11-24T08:29:57,924 INFO [RS:0;30c28c82771d:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:57,925 DEBUG [RS:0;30c28c82771d:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:29:57,925 DEBUG [RS:0;30c28c82771d:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:29:57,925 DEBUG [RS:0;30c28c82771d:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:29:57,925 DEBUG [RS:0;30c28c82771d:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:29:57,925 DEBUG [RS:0;30c28c82771d:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:29:57,925 DEBUG [RS:0;30c28c82771d:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/30c28c82771d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:29:57,925 DEBUG [RS:0;30c28c82771d:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:29:57,925 DEBUG [RS:0;30c28c82771d:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:29:57,925 DEBUG [RS:0;30c28c82771d:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:29:57,925 DEBUG [RS:0;30c28c82771d:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:29:57,925 DEBUG [RS:0;30c28c82771d:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:29:57,926 DEBUG [RS:0;30c28c82771d:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:29:57,926 DEBUG [RS:0;30c28c82771d:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/30c28c82771d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:29:57,926 DEBUG [RS:0;30c28c82771d:35553 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:29:57,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:29:57,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:29:57,927 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:29:57,928 INFO [RS:0;30c28c82771d:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:57,928 INFO [RS:0;30c28c82771d:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:57,928 INFO [RS:0;30c28c82771d:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:57,928 INFO [RS:0;30c28c82771d:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:57,929 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:29:57,929 INFO [RS:0;30c28c82771d:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:57,929 INFO [RS:0;30c28c82771d:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,35553,1732436997251-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:29:57,933 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:29:57,933 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=795396, jitterRate=0.011398673057556152}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:29:57,934 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732436997911Initializing all the Stores at 1732436997912 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732436997912Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732436997912Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732436997912Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732436997912Cleaning up temporary data from old regions at 1732436997926 (+14 ms)Region opened successfully at 1732436997934 (+8 ms) 2024-11-24T08:29:57,935 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:29:57,935 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:29:57,935 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:29:57,935 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:29:57,935 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:29:57,935 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:29:57,935 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732436997934Disabling compacts and flushes for region at 1732436997934Disabling writes for close at 1732436997935 (+1 ms)Writing region close event to WAL at 1732436997935Closed at 1732436997935 2024-11-24T08:29:57,937 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:29:57,937 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T08:29:57,937 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T08:29:57,939 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:29:57,940 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T08:29:57,957 INFO [RS:0;30c28c82771d:35553 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T08:29:57,957 INFO [RS:0;30c28c82771d:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,35553,1732436997251-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:57,958 INFO [RS:0;30c28c82771d:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:57,958 INFO [RS:0;30c28c82771d:35553 {}] regionserver.Replication(171): 30c28c82771d,35553,1732436997251 started 2024-11-24T08:29:57,978 INFO [RS:0;30c28c82771d:35553 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:57,978 INFO [RS:0;30c28c82771d:35553 {}] regionserver.HRegionServer(1482): Serving as 30c28c82771d,35553,1732436997251, RpcServer on 30c28c82771d/172.17.0.2:35553, sessionid=0x10149189ec80001 2024-11-24T08:29:57,978 DEBUG [RS:0;30c28c82771d:35553 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T08:29:57,978 DEBUG [RS:0;30c28c82771d:35553 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 30c28c82771d,35553,1732436997251 2024-11-24T08:29:57,978 DEBUG [RS:0;30c28c82771d:35553 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30c28c82771d,35553,1732436997251' 2024-11-24T08:29:57,978 DEBUG [RS:0;30c28c82771d:35553 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T08:29:57,979 DEBUG [RS:0;30c28c82771d:35553 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T08:29:57,979 DEBUG [RS:0;30c28c82771d:35553 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T08:29:57,979 DEBUG [RS:0;30c28c82771d:35553 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T08:29:57,979 DEBUG [RS:0;30c28c82771d:35553 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 30c28c82771d,35553,1732436997251 2024-11-24T08:29:57,979 DEBUG [RS:0;30c28c82771d:35553 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30c28c82771d,35553,1732436997251' 2024-11-24T08:29:57,979 DEBUG [RS:0;30c28c82771d:35553 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T08:29:57,980 DEBUG [RS:0;30c28c82771d:35553 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T08:29:57,980 DEBUG [RS:0;30c28c82771d:35553 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T08:29:57,980 INFO [RS:0;30c28c82771d:35553 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T08:29:57,980 INFO [RS:0;30c28c82771d:35553 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T08:29:58,083 INFO [RS:0;30c28c82771d:35553 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C35553%2C1732436997251, suffix=, logDir=hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/WALs/30c28c82771d,35553,1732436997251, archiveDir=hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/oldWALs, maxLogs=32 2024-11-24T08:29:58,086 INFO [RS:0;30c28c82771d:35553 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C35553%2C1732436997251.1732436998085 2024-11-24T08:29:58,091 WARN [30c28c82771d:40421 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T08:29:58,093 INFO [RS:0;30c28c82771d:35553 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/WALs/30c28c82771d,35553,1732436997251/30c28c82771d%2C35553%2C1732436997251.1732436998085 2024-11-24T08:29:58,094 DEBUG [RS:0;30c28c82771d:35553 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38251:38251),(127.0.0.1/127.0.0.1:41465:41465)] 2024-11-24T08:29:58,341 DEBUG [30c28c82771d:40421 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T08:29:58,342 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=30c28c82771d,35553,1732436997251 2024-11-24T08:29:58,344 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30c28c82771d,35553,1732436997251, state=OPENING 2024-11-24T08:29:58,346 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T08:29:58,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:29:58,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:29:58,349 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:29:58,349 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:29:58,349 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=30c28c82771d,35553,1732436997251}] 2024-11-24T08:29:58,349 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:29:58,504 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T08:29:58,506 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58611, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T08:29:58,511 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T08:29:58,512 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:29:58,514 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C35553%2C1732436997251.meta, suffix=.meta, logDir=hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/WALs/30c28c82771d,35553,1732436997251, archiveDir=hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/oldWALs, maxLogs=32 2024-11-24T08:29:58,516 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C35553%2C1732436997251.meta.1732436998516.meta 2024-11-24T08:29:58,522 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/WALs/30c28c82771d,35553,1732436997251/30c28c82771d%2C35553%2C1732436997251.meta.1732436998516.meta 2024-11-24T08:29:58,523 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41465:41465),(127.0.0.1/127.0.0.1:38251:38251)] 2024-11-24T08:29:58,524 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:29:58,525 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T08:29:58,525 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T08:29:58,525 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T08:29:58,525 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T08:29:58,525 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:29:58,525 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T08:29:58,525 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T08:29:58,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:29:58,528 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:29:58,528 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:29:58,529 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:29:58,529 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:29:58,530 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:29:58,530 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:29:58,531 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:29:58,531 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:29:58,532 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:29:58,532 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:29:58,533 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:29:58,533 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:29:58,534 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:29:58,534 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:29:58,535 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:29:58,535 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:29:58,536 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/data/hbase/meta/1588230740 2024-11-24T08:29:58,538 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/data/hbase/meta/1588230740 2024-11-24T08:29:58,540 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:29:58,540 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:29:58,540 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:29:58,542 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:29:58,543 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=857403, jitterRate=0.09024551510810852}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:29:58,543 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T08:29:58,545 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732436998525Writing region info on filesystem at 1732436998525Initializing all the Stores at 1732436998527 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732436998527Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732436998527Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732436998527Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732436998527Cleaning up temporary data from old regions at 1732436998540 (+13 ms)Running coprocessor post-open hooks at 1732436998544 (+4 ms)Region opened successfully at 1732436998544 2024-11-24T08:29:58,546 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732436998503 2024-11-24T08:29:58,549 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T08:29:58,550 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T08:29:58,551 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=30c28c82771d,35553,1732436997251 2024-11-24T08:29:58,552 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30c28c82771d,35553,1732436997251, state=OPEN 2024-11-24T08:29:58,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:29:58,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:29:58,557 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=30c28c82771d,35553,1732436997251 2024-11-24T08:29:58,557 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:29:58,557 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:29:58,564 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T08:29:58,564 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=30c28c82771d,35553,1732436997251 in 208 msec 2024-11-24T08:29:58,567 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T08:29:58,567 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 627 msec 2024-11-24T08:29:58,568 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:29:58,568 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T08:29:58,570 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:29:58,570 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30c28c82771d,35553,1732436997251, seqNum=-1] 2024-11-24T08:29:58,571 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:29:58,572 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46099, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:29:58,579 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 701 msec 2024-11-24T08:29:58,579 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732436998579, completionTime=-1 2024-11-24T08:29:58,579 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T08:29:58,580 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T08:29:58,582 INFO [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T08:29:58,582 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732437058582 2024-11-24T08:29:58,582 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732437118582 2024-11-24T08:29:58,582 INFO [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-24T08:29:58,583 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,40421,1732436997194-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:58,583 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,40421,1732436997194-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:58,583 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,40421,1732436997194-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:58,583 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-30c28c82771d:40421, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:58,583 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:58,583 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:58,585 DEBUG [master/30c28c82771d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T08:29:58,588 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.286sec 2024-11-24T08:29:58,588 INFO [master/30c28c82771d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T08:29:58,588 INFO [master/30c28c82771d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T08:29:58,588 INFO [master/30c28c82771d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T08:29:58,588 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T08:29:58,588 INFO [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T08:29:58,588 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,40421,1732436997194-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:29:58,588 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,40421,1732436997194-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T08:29:58,591 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T08:29:58,591 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T08:29:58,591 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,40421,1732436997194-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:29:58,680 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e5d9cb3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:29:58,680 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 30c28c82771d,40421,-1 for getting cluster id 2024-11-24T08:29:58,680 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T08:29:58,683 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b584536f-58c4-430b-9063-97a32737fd0a' 2024-11-24T08:29:58,683 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T08:29:58,683 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b584536f-58c4-430b-9063-97a32737fd0a" 2024-11-24T08:29:58,684 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39f18556, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:29:58,684 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [30c28c82771d,40421,-1] 2024-11-24T08:29:58,684 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T08:29:58,685 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:29:58,687 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41550, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T08:29:58,688 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27cf692f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:29:58,688 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:29:58,689 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30c28c82771d,35553,1732436997251, seqNum=-1] 2024-11-24T08:29:58,690 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:29:58,691 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38480, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:29:58,694 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=30c28c82771d,40421,1732436997194 2024-11-24T08:29:58,694 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:29:58,697 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T08:29:58,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T08:29:58,698 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:29:58,698 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:29:58,698 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:29:58,698 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:29:58,698 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T08:29:58,698 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T08:29:58,698 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=325101689, stopped=false 2024-11-24T08:29:58,698 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=30c28c82771d,40421,1732436997194 2024-11-24T08:29:58,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:29:58,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:29:58,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:29:58,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:29:58,700 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:29:58,700 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:29:58,701 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:29:58,701 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:29:58,701 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:29:58,701 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:29:58,701 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '30c28c82771d,35553,1732436997251' ***** 2024-11-24T08:29:58,701 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T08:29:58,701 INFO [RS:0;30c28c82771d:35553 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T08:29:58,701 INFO [RS:0;30c28c82771d:35553 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T08:29:58,701 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T08:29:58,702 INFO [RS:0;30c28c82771d:35553 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T08:29:58,702 INFO [RS:0;30c28c82771d:35553 {}] regionserver.HRegionServer(959): stopping server 30c28c82771d,35553,1732436997251 2024-11-24T08:29:58,702 INFO [RS:0;30c28c82771d:35553 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:29:58,702 INFO [RS:0;30c28c82771d:35553 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;30c28c82771d:35553. 2024-11-24T08:29:58,702 DEBUG [RS:0;30c28c82771d:35553 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:29:58,702 DEBUG [RS:0;30c28c82771d:35553 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:29:58,702 INFO [RS:0;30c28c82771d:35553 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T08:29:58,702 INFO [RS:0;30c28c82771d:35553 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T08:29:58,702 INFO [RS:0;30c28c82771d:35553 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T08:29:58,702 INFO [RS:0;30c28c82771d:35553 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T08:29:58,703 INFO [RS:0;30c28c82771d:35553 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-24T08:29:58,703 DEBUG [RS:0;30c28c82771d:35553 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-24T08:29:58,703 DEBUG [RS:0;30c28c82771d:35553 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T08:29:58,703 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:29:58,703 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:29:58,703 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:29:58,703 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:29:58,703 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:29:58,703 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-24T08:29:58,723 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/data/hbase/meta/1588230740/.tmp/ns/1167842b1d71419f95d970aeecbbc962 is 43, key is default/ns:d/1732436998573/Put/seqid=0 2024-11-24T08:29:58,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45857 is added to blk_1073741835_1011 (size=5153) 2024-11-24T08:29:58,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43937 is added to blk_1073741835_1011 (size=5153) 2024-11-24T08:29:58,730 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/data/hbase/meta/1588230740/.tmp/ns/1167842b1d71419f95d970aeecbbc962 2024-11-24T08:29:58,740 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/data/hbase/meta/1588230740/.tmp/ns/1167842b1d71419f95d970aeecbbc962 as hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/data/hbase/meta/1588230740/ns/1167842b1d71419f95d970aeecbbc962 2024-11-24T08:29:58,749 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/data/hbase/meta/1588230740/ns/1167842b1d71419f95d970aeecbbc962, entries=2, sequenceid=6, filesize=5.0 K 2024-11-24T08:29:58,751 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 48ms, sequenceid=6, compaction requested=false 2024-11-24T08:29:58,751 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T08:29:58,758 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-24T08:29:58,759 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:29:58,759 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:29:58,759 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732436998703Running coprocessor pre-close hooks at 1732436998703Disabling compacts and flushes for region at 1732436998703Disabling writes for close at 1732436998703Obtaining lock to block concurrent updates at 1732436998703Preparing flush snapshotting stores in 1588230740 at 1732436998703Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732436998703Flushing stores of hbase:meta,,1.1588230740 at 1732436998704 (+1 ms)Flushing 1588230740/ns: creating writer at 1732436998704Flushing 1588230740/ns: appending metadata at 1732436998722 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1732436998722Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15935805: reopening flushed file at 1732436998738 (+16 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 48ms, sequenceid=6, compaction requested=false at 1732436998751 (+13 ms)Writing region close event to WAL at 1732436998753 (+2 ms)Running coprocessor post-close hooks at 1732436998759 (+6 ms)Closed at 1732436998759 2024-11-24T08:29:58,759 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T08:29:58,903 INFO [RS:0;30c28c82771d:35553 {}] regionserver.HRegionServer(976): stopping server 30c28c82771d,35553,1732436997251; all regions closed. 2024-11-24T08:29:58,904 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:58,904 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:58,904 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:58,904 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:58,904 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:58,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43937 is added to blk_1073741834_1010 (size=1152) 2024-11-24T08:29:58,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45857 is added to blk_1073741834_1010 (size=1152) 2024-11-24T08:29:58,910 DEBUG [RS:0;30c28c82771d:35553 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/oldWALs 2024-11-24T08:29:58,910 INFO [RS:0;30c28c82771d:35553 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30c28c82771d%2C35553%2C1732436997251.meta:.meta(num 1732436998516) 2024-11-24T08:29:58,910 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:58,911 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:58,911 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:58,911 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:58,911 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:58,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45857 is added to blk_1073741833_1009 (size=93) 2024-11-24T08:29:58,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43937 is added to blk_1073741833_1009 (size=93) 2024-11-24T08:29:58,916 DEBUG [RS:0;30c28c82771d:35553 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/oldWALs 2024-11-24T08:29:58,916 INFO [RS:0;30c28c82771d:35553 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30c28c82771d%2C35553%2C1732436997251:(num 1732436998085) 2024-11-24T08:29:58,916 DEBUG [RS:0;30c28c82771d:35553 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:29:58,916 INFO [RS:0;30c28c82771d:35553 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:29:58,916 INFO [RS:0;30c28c82771d:35553 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:29:58,916 INFO [RS:0;30c28c82771d:35553 {}] hbase.ChoreService(370): Chore service for: regionserver/30c28c82771d:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-24T08:29:58,916 INFO [RS:0;30c28c82771d:35553 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:29:58,917 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:29:58,917 INFO [RS:0;30c28c82771d:35553 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35553 2024-11-24T08:29:58,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/30c28c82771d,35553,1732436997251 2024-11-24T08:29:58,919 INFO [RS:0;30c28c82771d:35553 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:29:58,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:29:58,921 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [30c28c82771d,35553,1732436997251] 2024-11-24T08:29:58,923 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/30c28c82771d,35553,1732436997251 already deleted, retry=false 2024-11-24T08:29:58,923 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 30c28c82771d,35553,1732436997251 expired; onlineServers=0 2024-11-24T08:29:58,923 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '30c28c82771d,40421,1732436997194' ***** 2024-11-24T08:29:58,923 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T08:29:58,923 INFO [M:0;30c28c82771d:40421 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:29:58,923 INFO [M:0;30c28c82771d:40421 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:29:58,924 DEBUG [M:0;30c28c82771d:40421 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T08:29:58,924 DEBUG [M:0;30c28c82771d:40421 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T08:29:58,924 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T08:29:58,924 DEBUG [master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732436997884 {}] cleaner.HFileCleaner(306): Exit Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732436997884,5,FailOnTimeoutGroup] 2024-11-24T08:29:58,924 DEBUG [master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732436997884 {}] cleaner.HFileCleaner(306): Exit Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732436997884,5,FailOnTimeoutGroup] 2024-11-24T08:29:58,924 INFO [M:0;30c28c82771d:40421 {}] hbase.ChoreService(370): Chore service for: master/30c28c82771d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T08:29:58,924 INFO [M:0;30c28c82771d:40421 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:29:58,924 DEBUG [M:0;30c28c82771d:40421 {}] master.HMaster(1795): Stopping service threads 2024-11-24T08:29:58,924 INFO [M:0;30c28c82771d:40421 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T08:29:58,924 INFO [M:0;30c28c82771d:40421 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:29:58,925 INFO [M:0;30c28c82771d:40421 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T08:29:58,925 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T08:29:58,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T08:29:58,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:29:58,926 DEBUG [M:0;30c28c82771d:40421 {}] zookeeper.ZKUtil(347): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T08:29:58,926 WARN [M:0;30c28c82771d:40421 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T08:29:58,926 INFO [M:0;30c28c82771d:40421 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/.lastflushedseqids 2024-11-24T08:29:58,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43937 is added to blk_1073741836_1012 (size=99) 2024-11-24T08:29:58,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45857 is added to blk_1073741836_1012 (size=99) 2024-11-24T08:29:58,934 INFO [M:0;30c28c82771d:40421 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T08:29:58,934 INFO [M:0;30c28c82771d:40421 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T08:29:58,935 DEBUG [M:0;30c28c82771d:40421 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:29:58,935 INFO [M:0;30c28c82771d:40421 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:29:58,935 DEBUG [M:0;30c28c82771d:40421 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:29:58,935 DEBUG [M:0;30c28c82771d:40421 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:29:58,935 DEBUG [M:0;30c28c82771d:40421 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:29:58,935 INFO [M:0;30c28c82771d:40421 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-24T08:29:58,955 DEBUG [M:0;30c28c82771d:40421 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4eda3057c1744ee88054e44c243176c1 is 82, key is hbase:meta,,1/info:regioninfo/1732436998550/Put/seqid=0 2024-11-24T08:29:58,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45857 is added to blk_1073741837_1013 (size=5672) 2024-11-24T08:29:58,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43937 is added to blk_1073741837_1013 (size=5672) 2024-11-24T08:29:58,963 INFO [M:0;30c28c82771d:40421 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4eda3057c1744ee88054e44c243176c1 2024-11-24T08:29:58,988 DEBUG [M:0;30c28c82771d:40421 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/177ed23a2bec40c98c51140ba3e7acec is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732436998578/Put/seqid=0 2024-11-24T08:29:58,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43937 is added to blk_1073741838_1014 (size=5275) 2024-11-24T08:29:58,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45857 is added to blk_1073741838_1014 (size=5275) 2024-11-24T08:29:58,995 INFO [M:0;30c28c82771d:40421 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/177ed23a2bec40c98c51140ba3e7acec 2024-11-24T08:29:59,022 INFO [RS:0;30c28c82771d:35553 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:29:59,022 INFO [RS:0;30c28c82771d:35553 {}] regionserver.HRegionServer(1031): Exiting; stopping=30c28c82771d,35553,1732436997251; zookeeper connection closed. 2024-11-24T08:29:59,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:29:59,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35553-0x10149189ec80001, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:29:59,022 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@67d06fdf {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@67d06fdf 2024-11-24T08:29:59,022 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T08:29:59,027 DEBUG [M:0;30c28c82771d:40421 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/164cb33bf9df420a8e6fcc6e5ddf0e06 is 69, key is 30c28c82771d,35553,1732436997251/rs:state/1732436997909/Put/seqid=0 2024-11-24T08:29:59,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43937 is added to blk_1073741839_1015 (size=5156) 2024-11-24T08:29:59,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45857 is added to blk_1073741839_1015 (size=5156) 2024-11-24T08:29:59,035 INFO [M:0;30c28c82771d:40421 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/164cb33bf9df420a8e6fcc6e5ddf0e06 2024-11-24T08:29:59,059 DEBUG [M:0;30c28c82771d:40421 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/30b939547b364f9f93ac4c9bbb392416 is 52, key is load_balancer_on/state:d/1732436998696/Put/seqid=0 2024-11-24T08:29:59,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43937 is added to blk_1073741840_1016 (size=5056) 2024-11-24T08:29:59,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45857 is added to blk_1073741840_1016 (size=5056) 2024-11-24T08:29:59,066 INFO [M:0;30c28c82771d:40421 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/30b939547b364f9f93ac4c9bbb392416 2024-11-24T08:29:59,073 DEBUG [M:0;30c28c82771d:40421 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4eda3057c1744ee88054e44c243176c1 as hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4eda3057c1744ee88054e44c243176c1 2024-11-24T08:29:59,079 INFO [M:0;30c28c82771d:40421 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4eda3057c1744ee88054e44c243176c1, entries=8, sequenceid=29, filesize=5.5 K 2024-11-24T08:29:59,080 DEBUG [M:0;30c28c82771d:40421 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/177ed23a2bec40c98c51140ba3e7acec as hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/177ed23a2bec40c98c51140ba3e7acec 2024-11-24T08:29:59,086 INFO [M:0;30c28c82771d:40421 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/177ed23a2bec40c98c51140ba3e7acec, entries=3, sequenceid=29, filesize=5.2 K 2024-11-24T08:29:59,087 DEBUG [M:0;30c28c82771d:40421 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/164cb33bf9df420a8e6fcc6e5ddf0e06 as hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/164cb33bf9df420a8e6fcc6e5ddf0e06 2024-11-24T08:29:59,094 INFO [M:0;30c28c82771d:40421 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/164cb33bf9df420a8e6fcc6e5ddf0e06, entries=1, sequenceid=29, filesize=5.0 K 2024-11-24T08:29:59,095 DEBUG [M:0;30c28c82771d:40421 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/30b939547b364f9f93ac4c9bbb392416 as hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/30b939547b364f9f93ac4c9bbb392416 2024-11-24T08:29:59,101 INFO [M:0;30c28c82771d:40421 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40285/user/jenkins/test-data/9afbd1e0-b0b2-9c0d-c534-acaafcd49a7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/30b939547b364f9f93ac4c9bbb392416, entries=1, sequenceid=29, filesize=4.9 K 2024-11-24T08:29:59,102 INFO [M:0;30c28c82771d:40421 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 167ms, sequenceid=29, compaction requested=false 2024-11-24T08:29:59,104 INFO [M:0;30c28c82771d:40421 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:29:59,104 DEBUG [M:0;30c28c82771d:40421 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732436998935Disabling compacts and flushes for region at 1732436998935Disabling writes for close at 1732436998935Obtaining lock to block concurrent updates at 1732436998935Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732436998935Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732436998935Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732436998936 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732436998936Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732436998955 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732436998955Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732436998970 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732436998988 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732436998988Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732436999002 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732436999026 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732436999026Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732436999042 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732436999059 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732436999059Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7846cdd8: reopening flushed file at 1732436999072 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35b3599d: reopening flushed file at 1732436999079 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ddeb415: reopening flushed file at 1732436999086 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@13a21ac2: reopening flushed file at 1732436999094 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 167ms, sequenceid=29, compaction requested=false at 1732436999102 (+8 ms)Writing region close event to WAL at 1732436999104 (+2 ms)Closed at 1732436999104 2024-11-24T08:29:59,104 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:59,105 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:59,105 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:59,105 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:59,105 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:29:59,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43937 is added to blk_1073741830_1006 (size=10311) 2024-11-24T08:29:59,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45857 is added to blk_1073741830_1006 (size=10311) 2024-11-24T08:29:59,109 INFO [M:0;30c28c82771d:40421 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T08:29:59,109 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:29:59,109 INFO [M:0;30c28c82771d:40421 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40421 2024-11-24T08:29:59,109 INFO [M:0;30c28c82771d:40421 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:29:59,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:29:59,211 INFO [M:0;30c28c82771d:40421 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:29:59,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40421-0x10149189ec80000, quorum=127.0.0.1:51977, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:29:59,214 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@38d766c7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:29:59,215 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@12686d25{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:29:59,215 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:29:59,215 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ab06e68{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:29:59,215 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3c30f553{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/hadoop.log.dir/,STOPPED} 2024-11-24T08:29:59,216 WARN [BP-516107162-172.17.0.2-1732436996280 heartbeating to localhost/127.0.0.1:40285 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:29:59,216 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:29:59,216 WARN [BP-516107162-172.17.0.2-1732436996280 heartbeating to localhost/127.0.0.1:40285 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-516107162-172.17.0.2-1732436996280 (Datanode Uuid 6d690175-a24f-4343-849b-c4eeecb9811f) service to localhost/127.0.0.1:40285 2024-11-24T08:29:59,216 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:29:59,217 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/cluster_e7b0f16b-cf89-2f41-5dfd-971534ff60e5/data/data3/current/BP-516107162-172.17.0.2-1732436996280 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:29:59,218 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/cluster_e7b0f16b-cf89-2f41-5dfd-971534ff60e5/data/data4/current/BP-516107162-172.17.0.2-1732436996280 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:29:59,218 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:29:59,220 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78fa6004{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:29:59,221 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6b5fac92{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:29:59,221 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:29:59,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4620cd8a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:29:59,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3270c9ae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/hadoop.log.dir/,STOPPED} 2024-11-24T08:29:59,223 WARN [BP-516107162-172.17.0.2-1732436996280 heartbeating to localhost/127.0.0.1:40285 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:29:59,223 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:29:59,223 WARN [BP-516107162-172.17.0.2-1732436996280 heartbeating to localhost/127.0.0.1:40285 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-516107162-172.17.0.2-1732436996280 (Datanode Uuid 39f0a1cb-008a-4808-8a4c-950ae8a09812) service to localhost/127.0.0.1:40285 2024-11-24T08:29:59,223 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:29:59,224 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/cluster_e7b0f16b-cf89-2f41-5dfd-971534ff60e5/data/data1/current/BP-516107162-172.17.0.2-1732436996280 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:29:59,224 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/cluster_e7b0f16b-cf89-2f41-5dfd-971534ff60e5/data/data2/current/BP-516107162-172.17.0.2-1732436996280 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:29:59,224 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:29:59,231 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bc50e3b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:29:59,231 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@8c19991{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:29:59,231 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:29:59,232 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d9de743{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:29:59,232 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@630e1a46{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/hadoop.log.dir/,STOPPED} 2024-11-24T08:29:59,238 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T08:29:59,255 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T08:29:59,255 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T08:29:59,255 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/hadoop.log.dir so I do NOT create it in target/test-data/9b17f034-a060-4041-392a-93fbc33540b3 2024-11-24T08:29:59,255 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96c236ac-0546-9314-1546-bb137ffbcdfd/hadoop.tmp.dir so I do NOT create it in target/test-data/9b17f034-a060-4041-392a-93fbc33540b3 2024-11-24T08:29:59,255 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b, deleteOnExit=true 2024-11-24T08:29:59,255 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T08:29:59,255 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/test.cache.data in system properties and HBase conf 2024-11-24T08:29:59,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T08:29:59,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/hadoop.log.dir in system properties and HBase conf 2024-11-24T08:29:59,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T08:29:59,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T08:29:59,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T08:29:59,256 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T08:29:59,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:29:59,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:29:59,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T08:29:59,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:29:59,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T08:29:59,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T08:29:59,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:29:59,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:29:59,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T08:29:59,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/nfs.dump.dir in system properties and HBase conf 2024-11-24T08:29:59,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/java.io.tmpdir in system properties and HBase conf 2024-11-24T08:29:59,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:29:59,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T08:29:59,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T08:29:59,272 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:29:59,347 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:29:59,353 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:29:59,356 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:29:59,356 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:29:59,356 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:29:59,357 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:29:59,358 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5d8a9c69{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:29:59,358 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c6abea1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:29:59,475 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6469263a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/java.io.tmpdir/jetty-localhost-40747-hadoop-hdfs-3_4_1-tests_jar-_-any-8273375220944655844/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:29:59,476 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2324be38{HTTP/1.1, (http/1.1)}{localhost:40747} 2024-11-24T08:29:59,476 INFO [Time-limited test {}] server.Server(415): Started @105599ms 2024-11-24T08:29:59,490 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:29:59,522 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:29:59,523 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T08:29:59,523 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T08:29:59,524 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-24T08:29:59,558 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:29:59,562 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:29:59,562 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:29:59,562 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:29:59,562 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:29:59,563 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@259c861e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:29:59,563 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2df55a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:29:59,642 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:29:59,648 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:29:59,693 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1d790455{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/java.io.tmpdir/jetty-localhost-43697-hadoop-hdfs-3_4_1-tests_jar-_-any-14823974858577240665/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:29:59,693 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@795c5052{HTTP/1.1, (http/1.1)}{localhost:43697} 2024-11-24T08:29:59,693 INFO [Time-limited test {}] server.Server(415): Started @105816ms 2024-11-24T08:29:59,694 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:29:59,727 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T08:29:59,729 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:29:59,746 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:29:59,748 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:29:59,749 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:29:59,765 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:29:59,770 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:29:59,772 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:29:59,772 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:29:59,772 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:29:59,772 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c80aceb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:29:59,773 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e20426d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:29:59,808 WARN [Thread-658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data1/current/BP-785019810-172.17.0.2-1732436999291/current, will proceed with Du for space computation calculation, 2024-11-24T08:29:59,808 WARN [Thread-659 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data2/current/BP-785019810-172.17.0.2-1732436999291/current, will proceed with Du for space computation calculation, 2024-11-24T08:29:59,833 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:29:59,836 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x647605f1becfe9ea with lease ID 0x9f07083e61bc9294: Processing first storage report for DS-06b9a65f-3a28-4448-a53c-d825f9800a62 from datanode DatanodeRegistration(127.0.0.1:34653, datanodeUuid=6cff4492-6289-4a01-bb4b-e9d857fcf40b, infoPort=36935, infoSecurePort=0, ipcPort=46383, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291) 2024-11-24T08:29:59,836 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x647605f1becfe9ea with lease ID 0x9f07083e61bc9294: from storage DS-06b9a65f-3a28-4448-a53c-d825f9800a62 node DatanodeRegistration(127.0.0.1:34653, datanodeUuid=6cff4492-6289-4a01-bb4b-e9d857fcf40b, infoPort=36935, infoSecurePort=0, ipcPort=46383, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:29:59,836 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x647605f1becfe9ea with lease ID 0x9f07083e61bc9294: Processing first storage report for DS-f25ef476-d825-42f1-9047-7b349231138c from datanode DatanodeRegistration(127.0.0.1:34653, datanodeUuid=6cff4492-6289-4a01-bb4b-e9d857fcf40b, infoPort=36935, infoSecurePort=0, ipcPort=46383, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291) 2024-11-24T08:29:59,836 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x647605f1becfe9ea with lease ID 0x9f07083e61bc9294: from storage DS-f25ef476-d825-42f1-9047-7b349231138c node DatanodeRegistration(127.0.0.1:34653, datanodeUuid=6cff4492-6289-4a01-bb4b-e9d857fcf40b, infoPort=36935, infoSecurePort=0, ipcPort=46383, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:29:59,896 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7efd52f6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/java.io.tmpdir/jetty-localhost-42261-hadoop-hdfs-3_4_1-tests_jar-_-any-7184024642530582419/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:29:59,896 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4577488a{HTTP/1.1, (http/1.1)}{localhost:42261} 2024-11-24T08:29:59,896 INFO [Time-limited test {}] server.Server(415): Started @106019ms 2024-11-24T08:29:59,898 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:29:59,932 INFO [regionserver/30c28c82771d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:30:00,016 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data4/current/BP-785019810-172.17.0.2-1732436999291/current, will proceed with Du for space computation calculation, 2024-11-24T08:30:00,016 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data3/current/BP-785019810-172.17.0.2-1732436999291/current, will proceed with Du for space computation calculation, 2024-11-24T08:30:00,043 WARN [Thread-673 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:30:00,046 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe16eb9bf98e82fa8 with lease ID 0x9f07083e61bc9295: Processing first storage report for DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69 from datanode DatanodeRegistration(127.0.0.1:44185, datanodeUuid=5ec8d436-8ea8-4223-9061-288fdef1d878, infoPort=45471, infoSecurePort=0, ipcPort=45863, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291) 2024-11-24T08:30:00,046 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe16eb9bf98e82fa8 with lease ID 0x9f07083e61bc9295: from storage DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69 node DatanodeRegistration(127.0.0.1:44185, datanodeUuid=5ec8d436-8ea8-4223-9061-288fdef1d878, infoPort=45471, infoSecurePort=0, ipcPort=45863, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:30:00,046 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe16eb9bf98e82fa8 with lease ID 0x9f07083e61bc9295: Processing first storage report for DS-b5f0f57c-56ab-4193-b307-d8708a36ce0f from datanode DatanodeRegistration(127.0.0.1:44185, datanodeUuid=5ec8d436-8ea8-4223-9061-288fdef1d878, infoPort=45471, infoSecurePort=0, ipcPort=45863, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291) 2024-11-24T08:30:00,046 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe16eb9bf98e82fa8 with lease ID 0x9f07083e61bc9295: from storage DS-b5f0f57c-56ab-4193-b307-d8708a36ce0f node DatanodeRegistration(127.0.0.1:44185, datanodeUuid=5ec8d436-8ea8-4223-9061-288fdef1d878, infoPort=45471, infoSecurePort=0, ipcPort=45863, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:30:00,137 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3 2024-11-24T08:30:00,140 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/zookeeper_0, clientPort=65078, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T08:30:00,141 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=65078 2024-11-24T08:30:00,141 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:30:00,143 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:30:00,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:30:00,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34653 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:30:00,155 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40 with version=8 2024-11-24T08:30:00,155 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/hbase-staging 2024-11-24T08:30:00,159 INFO [Time-limited test {}] client.ConnectionUtils(128): master/30c28c82771d:0 server-side Connection retries=45 2024-11-24T08:30:00,159 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:30:00,159 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:30:00,159 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:30:00,159 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:30:00,159 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:30:00,159 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T08:30:00,163 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:30:00,164 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35659 2024-11-24T08:30:00,166 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35659 connecting to ZooKeeper ensemble=127.0.0.1:65078 2024-11-24T08:30:00,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:356590x0, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:30:00,173 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35659-0x1014918aa5b0000 connected 2024-11-24T08:30:00,195 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:30:00,197 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:30:00,200 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:30:00,200 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40, hbase.cluster.distributed=false 2024-11-24T08:30:00,202 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:30:00,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35659 2024-11-24T08:30:00,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35659 2024-11-24T08:30:00,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35659 2024-11-24T08:30:00,204 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35659 2024-11-24T08:30:00,204 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35659 2024-11-24T08:30:00,221 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/30c28c82771d:0 server-side Connection retries=45 2024-11-24T08:30:00,221 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:30:00,221 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:30:00,221 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:30:00,221 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:30:00,221 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:30:00,221 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T08:30:00,221 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:30:00,222 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45841 2024-11-24T08:30:00,223 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45841 connecting to ZooKeeper ensemble=127.0.0.1:65078 2024-11-24T08:30:00,224 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:30:00,226 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:30:00,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:458410x0, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:30:00,231 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45841-0x1014918aa5b0001 connected 2024-11-24T08:30:00,232 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:30:00,232 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T08:30:00,233 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T08:30:00,233 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T08:30:00,234 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:30:00,235 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45841 2024-11-24T08:30:00,235 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45841 2024-11-24T08:30:00,235 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45841 2024-11-24T08:30:00,236 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45841 2024-11-24T08:30:00,236 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45841 2024-11-24T08:30:00,249 DEBUG [M:0;30c28c82771d:35659 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;30c28c82771d:35659 2024-11-24T08:30:00,249 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/30c28c82771d,35659,1732437000158 2024-11-24T08:30:00,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:30:00,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:30:00,251 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/30c28c82771d,35659,1732437000158 2024-11-24T08:30:00,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T08:30:00,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:00,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:00,255 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T08:30:00,255 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/30c28c82771d,35659,1732437000158 from backup master directory 2024-11-24T08:30:00,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/30c28c82771d,35659,1732437000158 2024-11-24T08:30:00,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:30:00,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:30:00,257 WARN [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:30:00,257 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=30c28c82771d,35659,1732437000158 2024-11-24T08:30:00,263 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/hbase.id] with ID: 568109f0-24f1-4cc4-b2eb-418fd9c2c8ed 2024-11-24T08:30:00,263 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/.tmp/hbase.id 2024-11-24T08:30:00,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34653 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:30:00,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:30:00,271 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/.tmp/hbase.id]:[hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/hbase.id] 2024-11-24T08:30:00,283 INFO [master/30c28c82771d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:30:00,283 INFO [master/30c28c82771d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T08:30:00,285 INFO [master/30c28c82771d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-24T08:30:00,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:00,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:00,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34653 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:30:00,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:30:00,294 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:30:00,295 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T08:30:00,295 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:30:00,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34653 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:30:00,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:30:00,304 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store 2024-11-24T08:30:00,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:30:00,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34653 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:30:00,312 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:30:00,312 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:30:00,312 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:30:00,312 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:30:00,312 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:30:00,312 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:30:00,312 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:30:00,312 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732437000312Disabling compacts and flushes for region at 1732437000312Disabling writes for close at 1732437000312Writing region close event to WAL at 1732437000312Closed at 1732437000312 2024-11-24T08:30:00,313 WARN [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/.initializing 2024-11-24T08:30:00,313 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/WALs/30c28c82771d,35659,1732437000158 2024-11-24T08:30:00,316 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C35659%2C1732437000158, suffix=, logDir=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/WALs/30c28c82771d,35659,1732437000158, archiveDir=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/oldWALs, maxLogs=10 2024-11-24T08:30:00,316 INFO [master/30c28c82771d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C35659%2C1732437000158.1732437000316 2024-11-24T08:30:00,322 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/WALs/30c28c82771d,35659,1732437000158/30c28c82771d%2C35659%2C1732437000158.1732437000316 2024-11-24T08:30:00,323 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45471:45471),(127.0.0.1/127.0.0.1:36935:36935)] 2024-11-24T08:30:00,323 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:30:00,323 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:30:00,323 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:00,324 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:00,325 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:00,326 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T08:30:00,326 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:00,327 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:30:00,327 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:00,328 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T08:30:00,328 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:00,329 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:30:00,329 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:00,330 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T08:30:00,330 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:00,331 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:30:00,331 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:00,332 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T08:30:00,332 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:00,333 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:30:00,333 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:00,334 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:00,335 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:00,336 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:00,336 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:00,337 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T08:30:00,339 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:00,341 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:30:00,342 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=859326, jitterRate=0.09269073605537415}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T08:30:00,343 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732437000324Initializing all the Stores at 1732437000325 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437000325Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437000325Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437000325Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437000325Cleaning up temporary data from old regions at 1732437000336 (+11 ms)Region opened successfully at 1732437000343 (+7 ms) 2024-11-24T08:30:00,344 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T08:30:00,348 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23371dcb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30c28c82771d/172.17.0.2:0 2024-11-24T08:30:00,349 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T08:30:00,349 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T08:30:00,349 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T08:30:00,349 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T08:30:00,350 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T08:30:00,350 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T08:30:00,350 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T08:30:00,353 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T08:30:00,354 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T08:30:00,356 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T08:30:00,356 INFO [master/30c28c82771d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T08:30:00,357 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T08:30:00,358 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T08:30:00,358 INFO [master/30c28c82771d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T08:30:00,359 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T08:30:00,361 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T08:30:00,362 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T08:30:00,363 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T08:30:00,366 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T08:30:00,367 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T08:30:00,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:30:00,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:30:00,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:00,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:00,370 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=30c28c82771d,35659,1732437000158, sessionid=0x1014918aa5b0000, setting cluster-up flag (Was=false) 2024-11-24T08:30:00,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:00,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:00,378 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T08:30:00,379 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30c28c82771d,35659,1732437000158 2024-11-24T08:30:00,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:00,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:00,388 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T08:30:00,389 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30c28c82771d,35659,1732437000158 2024-11-24T08:30:00,390 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T08:30:00,393 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T08:30:00,393 INFO [master/30c28c82771d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T08:30:00,393 INFO [master/30c28c82771d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T08:30:00,393 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 30c28c82771d,35659,1732437000158 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T08:30:00,395 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:30:00,395 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:30:00,395 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:30:00,395 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:30:00,395 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/30c28c82771d:0, corePoolSize=10, maxPoolSize=10 2024-11-24T08:30:00,395 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:00,395 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/30c28c82771d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:30:00,395 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:00,397 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732437030397 2024-11-24T08:30:00,397 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T08:30:00,397 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T08:30:00,397 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T08:30:00,397 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T08:30:00,397 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T08:30:00,397 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T08:30:00,397 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:00,397 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:30:00,398 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T08:30:00,398 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T08:30:00,398 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T08:30:00,398 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T08:30:00,398 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T08:30:00,398 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T08:30:00,399 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732437000398,5,FailOnTimeoutGroup] 2024-11-24T08:30:00,399 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732437000399,5,FailOnTimeoutGroup] 2024-11-24T08:30:00,399 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:00,399 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:00,399 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T08:30:00,399 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:00,399 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:00,399 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T08:30:00,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34653 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:30:00,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:30:00,408 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T08:30:00,408 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40 2024-11-24T08:30:00,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:30:00,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34653 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:30:00,419 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:30:00,420 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:30:00,422 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:30:00,422 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:00,423 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:30:00,423 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:30:00,424 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:30:00,424 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:00,425 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:30:00,425 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:30:00,426 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:30:00,426 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:00,427 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:30:00,427 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:30:00,429 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:30:00,429 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:00,429 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:30:00,429 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:30:00,430 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740 2024-11-24T08:30:00,431 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740 2024-11-24T08:30:00,433 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:30:00,433 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:30:00,433 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:30:00,434 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:30:00,437 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:30:00,438 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=748344, jitterRate=-0.048432230949401855}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:30:00,438 INFO [RS:0;30c28c82771d:45841 {}] regionserver.HRegionServer(746): ClusterId : 568109f0-24f1-4cc4-b2eb-418fd9c2c8ed 2024-11-24T08:30:00,438 DEBUG [RS:0;30c28c82771d:45841 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T08:30:00,439 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732437000419Initializing all the Stores at 1732437000420 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437000420Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437000420Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437000420Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437000420Cleaning up temporary data from old regions at 1732437000433 (+13 ms)Region opened successfully at 1732437000439 (+6 ms) 2024-11-24T08:30:00,439 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:30:00,439 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:30:00,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:30:00,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:30:00,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:30:00,440 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:30:00,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732437000439Disabling compacts and flushes for region at 1732437000439Disabling writes for close at 1732437000440 (+1 ms)Writing region close event to WAL at 1732437000440Closed at 1732437000440 2024-11-24T08:30:00,441 DEBUG [RS:0;30c28c82771d:45841 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T08:30:00,441 DEBUG [RS:0;30c28c82771d:45841 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T08:30:00,442 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:30:00,442 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T08:30:00,442 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T08:30:00,443 DEBUG [RS:0;30c28c82771d:45841 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T08:30:00,444 DEBUG [RS:0;30c28c82771d:45841 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@753efdb0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30c28c82771d/172.17.0.2:0 2024-11-24T08:30:00,444 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:30:00,446 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T08:30:00,458 DEBUG [RS:0;30c28c82771d:45841 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;30c28c82771d:45841 2024-11-24T08:30:00,458 INFO [RS:0;30c28c82771d:45841 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T08:30:00,458 INFO [RS:0;30c28c82771d:45841 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T08:30:00,458 DEBUG [RS:0;30c28c82771d:45841 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T08:30:00,459 INFO [RS:0;30c28c82771d:45841 {}] regionserver.HRegionServer(2659): reportForDuty to master=30c28c82771d,35659,1732437000158 with port=45841, startcode=1732437000220 2024-11-24T08:30:00,459 DEBUG [RS:0;30c28c82771d:45841 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T08:30:00,461 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47773, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T08:30:00,462 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35659 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 30c28c82771d,45841,1732437000220 2024-11-24T08:30:00,462 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35659 {}] master.ServerManager(517): Registering regionserver=30c28c82771d,45841,1732437000220 2024-11-24T08:30:00,464 DEBUG [RS:0;30c28c82771d:45841 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40 2024-11-24T08:30:00,464 DEBUG [RS:0;30c28c82771d:45841 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45383 2024-11-24T08:30:00,464 DEBUG [RS:0;30c28c82771d:45841 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T08:30:00,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:30:00,468 DEBUG [RS:0;30c28c82771d:45841 {}] zookeeper.ZKUtil(111): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/30c28c82771d,45841,1732437000220 2024-11-24T08:30:00,468 WARN [RS:0;30c28c82771d:45841 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:30:00,468 INFO [RS:0;30c28c82771d:45841 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:30:00,468 DEBUG [RS:0;30c28c82771d:45841 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220 2024-11-24T08:30:00,468 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [30c28c82771d,45841,1732437000220] 2024-11-24T08:30:00,472 INFO [RS:0;30c28c82771d:45841 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T08:30:00,475 INFO [RS:0;30c28c82771d:45841 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T08:30:00,476 INFO [RS:0;30c28c82771d:45841 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T08:30:00,476 INFO [RS:0;30c28c82771d:45841 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:00,476 INFO [RS:0;30c28c82771d:45841 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T08:30:00,477 INFO [RS:0;30c28c82771d:45841 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T08:30:00,477 INFO [RS:0;30c28c82771d:45841 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:00,477 DEBUG [RS:0;30c28c82771d:45841 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:00,477 DEBUG [RS:0;30c28c82771d:45841 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:00,478 DEBUG [RS:0;30c28c82771d:45841 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:00,478 DEBUG [RS:0;30c28c82771d:45841 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:00,478 DEBUG [RS:0;30c28c82771d:45841 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:00,478 DEBUG [RS:0;30c28c82771d:45841 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/30c28c82771d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:30:00,478 DEBUG [RS:0;30c28c82771d:45841 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:00,478 DEBUG [RS:0;30c28c82771d:45841 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:00,478 DEBUG [RS:0;30c28c82771d:45841 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:00,478 DEBUG [RS:0;30c28c82771d:45841 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:00,478 DEBUG [RS:0;30c28c82771d:45841 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:00,478 DEBUG [RS:0;30c28c82771d:45841 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:00,478 DEBUG [RS:0;30c28c82771d:45841 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/30c28c82771d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:30:00,478 DEBUG [RS:0;30c28c82771d:45841 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:30:00,479 INFO [RS:0;30c28c82771d:45841 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:00,479 INFO [RS:0;30c28c82771d:45841 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:00,479 INFO [RS:0;30c28c82771d:45841 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:00,479 INFO [RS:0;30c28c82771d:45841 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:00,479 INFO [RS:0;30c28c82771d:45841 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:00,479 INFO [RS:0;30c28c82771d:45841 {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,45841,1732437000220-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:30:00,520 INFO [RS:0;30c28c82771d:45841 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T08:30:00,520 INFO [RS:0;30c28c82771d:45841 {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,45841,1732437000220-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:00,520 INFO [RS:0;30c28c82771d:45841 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:00,521 INFO [RS:0;30c28c82771d:45841 {}] regionserver.Replication(171): 30c28c82771d,45841,1732437000220 started 2024-11-24T08:30:00,535 INFO [RS:0;30c28c82771d:45841 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:00,535 INFO [RS:0;30c28c82771d:45841 {}] regionserver.HRegionServer(1482): Serving as 30c28c82771d,45841,1732437000220, RpcServer on 30c28c82771d/172.17.0.2:45841, sessionid=0x1014918aa5b0001 2024-11-24T08:30:00,536 DEBUG [RS:0;30c28c82771d:45841 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T08:30:00,536 DEBUG [RS:0;30c28c82771d:45841 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 30c28c82771d,45841,1732437000220 2024-11-24T08:30:00,536 DEBUG [RS:0;30c28c82771d:45841 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30c28c82771d,45841,1732437000220' 2024-11-24T08:30:00,536 DEBUG [RS:0;30c28c82771d:45841 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T08:30:00,536 DEBUG [RS:0;30c28c82771d:45841 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T08:30:00,537 DEBUG [RS:0;30c28c82771d:45841 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T08:30:00,537 DEBUG [RS:0;30c28c82771d:45841 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T08:30:00,537 DEBUG [RS:0;30c28c82771d:45841 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 30c28c82771d,45841,1732437000220 2024-11-24T08:30:00,537 DEBUG [RS:0;30c28c82771d:45841 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30c28c82771d,45841,1732437000220' 2024-11-24T08:30:00,537 DEBUG [RS:0;30c28c82771d:45841 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T08:30:00,538 DEBUG [RS:0;30c28c82771d:45841 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T08:30:00,538 DEBUG [RS:0;30c28c82771d:45841 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T08:30:00,538 INFO [RS:0;30c28c82771d:45841 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T08:30:00,538 INFO [RS:0;30c28c82771d:45841 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T08:30:00,596 WARN [30c28c82771d:35659 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T08:30:00,641 INFO [RS:0;30c28c82771d:45841 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C45841%2C1732437000220, suffix=, logDir=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220, archiveDir=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/oldWALs, maxLogs=32 2024-11-24T08:30:00,643 INFO [RS:0;30c28c82771d:45841 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C45841%2C1732437000220.1732437000642 2024-11-24T08:30:00,652 INFO [RS:0;30c28c82771d:45841 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437000642 2024-11-24T08:30:00,655 DEBUG [RS:0;30c28c82771d:45841 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45471:45471),(127.0.0.1/127.0.0.1:36935:36935)] 2024-11-24T08:30:00,846 DEBUG [30c28c82771d:35659 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T08:30:00,847 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=30c28c82771d,45841,1732437000220 2024-11-24T08:30:00,849 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30c28c82771d,45841,1732437000220, state=OPENING 2024-11-24T08:30:01,007 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T08:30:01,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:01,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:01,040 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:30:01,041 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:30:01,041 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:30:01,041 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=30c28c82771d,45841,1732437000220}] 2024-11-24T08:30:01,195 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T08:30:01,197 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49403, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T08:30:01,202 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T08:30:01,202 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:30:01,204 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C45841%2C1732437000220.meta, suffix=.meta, logDir=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220, archiveDir=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/oldWALs, maxLogs=32 2024-11-24T08:30:01,205 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta 2024-11-24T08:30:01,211 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta 2024-11-24T08:30:01,211 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36935:36935),(127.0.0.1/127.0.0.1:45471:45471)] 2024-11-24T08:30:01,212 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:30:01,213 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T08:30:01,213 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T08:30:01,213 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T08:30:01,213 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T08:30:01,213 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:30:01,213 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T08:30:01,213 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T08:30:01,215 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:30:01,216 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:30:01,216 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:01,217 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:30:01,217 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:30:01,218 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:30:01,218 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:01,219 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:30:01,219 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:30:01,220 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:30:01,220 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:01,220 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:30:01,220 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:30:01,221 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:30:01,221 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:01,223 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:30:01,223 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:30:01,224 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740 2024-11-24T08:30:01,225 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740 2024-11-24T08:30:01,226 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:30:01,226 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:30:01,227 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:30:01,228 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:30:01,229 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=755690, jitterRate=-0.03909069299697876}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:30:01,229 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T08:30:01,230 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732437001213Writing region info on filesystem at 1732437001213Initializing all the Stores at 1732437001214 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437001214Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437001215 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437001215Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437001215Cleaning up temporary data from old regions at 1732437001226 (+11 ms)Running coprocessor post-open hooks at 1732437001229 (+3 ms)Region opened successfully at 1732437001229 2024-11-24T08:30:01,231 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732437001194 2024-11-24T08:30:01,234 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T08:30:01,234 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T08:30:01,235 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=30c28c82771d,45841,1732437000220 2024-11-24T08:30:01,236 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30c28c82771d,45841,1732437000220, state=OPEN 2024-11-24T08:30:01,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:30:01,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:30:01,241 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=30c28c82771d,45841,1732437000220 2024-11-24T08:30:01,241 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:30:01,241 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:30:01,244 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T08:30:01,244 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=30c28c82771d,45841,1732437000220 in 200 msec 2024-11-24T08:30:01,248 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T08:30:01,248 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 802 msec 2024-11-24T08:30:01,249 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:30:01,249 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T08:30:01,251 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:30:01,251 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30c28c82771d,45841,1732437000220, seqNum=-1] 2024-11-24T08:30:01,251 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:30:01,253 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59995, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:30:01,259 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 866 msec 2024-11-24T08:30:01,259 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732437001259, completionTime=-1 2024-11-24T08:30:01,260 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T08:30:01,260 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T08:30:01,261 INFO [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T08:30:01,261 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732437061261 2024-11-24T08:30:01,262 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732437121262 2024-11-24T08:30:01,262 INFO [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-24T08:30:01,262 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,35659,1732437000158-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:01,262 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,35659,1732437000158-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:01,262 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,35659,1732437000158-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:01,262 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-30c28c82771d:35659, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:01,262 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:01,263 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:01,264 DEBUG [master/30c28c82771d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T08:30:01,267 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.010sec 2024-11-24T08:30:01,267 INFO [master/30c28c82771d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T08:30:01,267 INFO [master/30c28c82771d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T08:30:01,267 INFO [master/30c28c82771d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T08:30:01,267 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T08:30:01,267 INFO [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T08:30:01,267 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,35659,1732437000158-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:30:01,268 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,35659,1732437000158-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T08:30:01,270 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T08:30:01,270 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T08:30:01,270 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,35659,1732437000158-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:01,338 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f9b72e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:30:01,338 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 30c28c82771d,35659,-1 for getting cluster id 2024-11-24T08:30:01,339 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T08:30:01,340 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '568109f0-24f1-4cc4-b2eb-418fd9c2c8ed' 2024-11-24T08:30:01,341 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T08:30:01,341 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "568109f0-24f1-4cc4-b2eb-418fd9c2c8ed" 2024-11-24T08:30:01,341 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@356369f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:30:01,342 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [30c28c82771d,35659,-1] 2024-11-24T08:30:01,342 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T08:30:01,342 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:30:01,344 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47676, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T08:30:01,344 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e308c17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:30:01,345 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:30:01,346 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30c28c82771d,45841,1732437000220, seqNum=-1] 2024-11-24T08:30:01,346 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:30:01,348 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46784, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:30:01,350 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=30c28c82771d,35659,1732437000158 2024-11-24T08:30:01,350 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:30:01,353 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T08:30:01,369 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/30c28c82771d:0 server-side Connection retries=45 2024-11-24T08:30:01,369 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:30:01,370 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:30:01,370 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:30:01,370 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:30:01,370 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:30:01,370 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T08:30:01,370 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:30:01,371 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34635 2024-11-24T08:30:01,372 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34635 connecting to ZooKeeper ensemble=127.0.0.1:65078 2024-11-24T08:30:01,372 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:30:01,374 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:30:01,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:346350x0, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:30:01,378 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34635-0x1014918aa5b0002 connected 2024-11-24T08:30:01,378 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:34635-0x1014918aa5b0002, quorum=127.0.0.1:65078, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-24T08:30:01,378 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-24T08:30:01,379 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T08:30:01,379 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T08:30:01,380 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:34635-0x1014918aa5b0002, quorum=127.0.0.1:65078, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T08:30:01,381 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34635-0x1014918aa5b0002, quorum=127.0.0.1:65078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:30:01,382 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34635 2024-11-24T08:30:01,382 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34635 2024-11-24T08:30:01,382 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34635 2024-11-24T08:30:01,383 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34635 2024-11-24T08:30:01,383 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34635 2024-11-24T08:30:01,384 INFO [RS:1;30c28c82771d:34635 {}] regionserver.HRegionServer(746): ClusterId : 568109f0-24f1-4cc4-b2eb-418fd9c2c8ed 2024-11-24T08:30:01,384 DEBUG [RS:1;30c28c82771d:34635 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T08:30:01,386 DEBUG [RS:1;30c28c82771d:34635 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T08:30:01,386 DEBUG [RS:1;30c28c82771d:34635 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T08:30:01,389 DEBUG [RS:1;30c28c82771d:34635 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T08:30:01,390 DEBUG [RS:1;30c28c82771d:34635 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a535180, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30c28c82771d/172.17.0.2:0 2024-11-24T08:30:01,402 DEBUG [RS:1;30c28c82771d:34635 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;30c28c82771d:34635 2024-11-24T08:30:01,402 INFO [RS:1;30c28c82771d:34635 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T08:30:01,402 INFO [RS:1;30c28c82771d:34635 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T08:30:01,402 DEBUG [RS:1;30c28c82771d:34635 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T08:30:01,402 INFO [RS:1;30c28c82771d:34635 {}] regionserver.HRegionServer(2659): reportForDuty to master=30c28c82771d,35659,1732437000158 with port=34635, startcode=1732437001369 2024-11-24T08:30:01,403 DEBUG [RS:1;30c28c82771d:34635 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T08:30:01,405 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42349, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T08:30:01,405 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35659 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 30c28c82771d,34635,1732437001369 2024-11-24T08:30:01,405 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35659 {}] master.ServerManager(517): Registering regionserver=30c28c82771d,34635,1732437001369 2024-11-24T08:30:01,407 DEBUG [RS:1;30c28c82771d:34635 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40 2024-11-24T08:30:01,407 DEBUG [RS:1;30c28c82771d:34635 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45383 2024-11-24T08:30:01,407 DEBUG [RS:1;30c28c82771d:34635 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T08:30:01,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:30:01,410 DEBUG [RS:1;30c28c82771d:34635 {}] zookeeper.ZKUtil(111): regionserver:34635-0x1014918aa5b0002, quorum=127.0.0.1:65078, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/30c28c82771d,34635,1732437001369 2024-11-24T08:30:01,410 WARN [RS:1;30c28c82771d:34635 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:30:01,410 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [30c28c82771d,34635,1732437001369] 2024-11-24T08:30:01,410 INFO [RS:1;30c28c82771d:34635 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:30:01,410 DEBUG [RS:1;30c28c82771d:34635 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369 2024-11-24T08:30:01,414 INFO [RS:1;30c28c82771d:34635 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T08:30:01,415 INFO [RS:1;30c28c82771d:34635 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T08:30:01,416 INFO [RS:1;30c28c82771d:34635 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T08:30:01,416 INFO [RS:1;30c28c82771d:34635 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:01,416 INFO [RS:1;30c28c82771d:34635 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T08:30:01,417 INFO [RS:1;30c28c82771d:34635 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T08:30:01,417 INFO [RS:1;30c28c82771d:34635 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:01,417 DEBUG [RS:1;30c28c82771d:34635 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:01,417 DEBUG [RS:1;30c28c82771d:34635 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:01,417 DEBUG [RS:1;30c28c82771d:34635 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:01,417 DEBUG [RS:1;30c28c82771d:34635 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:01,418 DEBUG [RS:1;30c28c82771d:34635 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:01,418 DEBUG [RS:1;30c28c82771d:34635 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/30c28c82771d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:30:01,418 DEBUG [RS:1;30c28c82771d:34635 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:01,418 DEBUG [RS:1;30c28c82771d:34635 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:01,418 DEBUG [RS:1;30c28c82771d:34635 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:01,418 DEBUG [RS:1;30c28c82771d:34635 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:01,418 DEBUG [RS:1;30c28c82771d:34635 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:01,418 DEBUG [RS:1;30c28c82771d:34635 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:01,418 DEBUG [RS:1;30c28c82771d:34635 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/30c28c82771d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:30:01,418 DEBUG [RS:1;30c28c82771d:34635 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:30:01,418 INFO [RS:1;30c28c82771d:34635 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:01,418 INFO [RS:1;30c28c82771d:34635 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:01,418 INFO [RS:1;30c28c82771d:34635 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:01,419 INFO [RS:1;30c28c82771d:34635 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:01,419 INFO [RS:1;30c28c82771d:34635 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:01,419 INFO [RS:1;30c28c82771d:34635 {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,34635,1732437001369-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:30:01,434 INFO [RS:1;30c28c82771d:34635 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T08:30:01,434 INFO [RS:1;30c28c82771d:34635 {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,34635,1732437001369-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:01,434 INFO [RS:1;30c28c82771d:34635 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:01,434 INFO [RS:1;30c28c82771d:34635 {}] regionserver.Replication(171): 30c28c82771d,34635,1732437001369 started 2024-11-24T08:30:01,449 INFO [RS:1;30c28c82771d:34635 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:01,449 INFO [RS:1;30c28c82771d:34635 {}] regionserver.HRegionServer(1482): Serving as 30c28c82771d,34635,1732437001369, RpcServer on 30c28c82771d/172.17.0.2:34635, sessionid=0x1014918aa5b0002 2024-11-24T08:30:01,449 DEBUG [RS:1;30c28c82771d:34635 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T08:30:01,449 DEBUG [RS:1;30c28c82771d:34635 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 30c28c82771d,34635,1732437001369 2024-11-24T08:30:01,449 DEBUG [RS:1;30c28c82771d:34635 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30c28c82771d,34635,1732437001369' 2024-11-24T08:30:01,449 DEBUG [RS:1;30c28c82771d:34635 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T08:30:01,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;30c28c82771d:34635,5,FailOnTimeoutGroup] 2024-11-24T08:30:01,450 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-24T08:30:01,450 DEBUG [RS:1;30c28c82771d:34635 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T08:30:01,450 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T08:30:01,450 DEBUG [RS:1;30c28c82771d:34635 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T08:30:01,450 DEBUG [RS:1;30c28c82771d:34635 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T08:30:01,450 DEBUG [RS:1;30c28c82771d:34635 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 30c28c82771d,34635,1732437001369 2024-11-24T08:30:01,450 DEBUG [RS:1;30c28c82771d:34635 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30c28c82771d,34635,1732437001369' 2024-11-24T08:30:01,450 DEBUG [RS:1;30c28c82771d:34635 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T08:30:01,451 DEBUG [RS:1;30c28c82771d:34635 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T08:30:01,451 DEBUG [RS:1;30c28c82771d:34635 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T08:30:01,451 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 30c28c82771d,35659,1732437000158 2024-11-24T08:30:01,452 INFO [RS:1;30c28c82771d:34635 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T08:30:01,452 INFO [RS:1;30c28c82771d:34635 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T08:30:01,452 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@25cff401 2024-11-24T08:30:01,452 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T08:30:01,454 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47688, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T08:30:01,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35659 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T08:30:01,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35659 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T08:30:01,455 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35659 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:30:01,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35659 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T08:30:01,458 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T08:30:01,459 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:01,459 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35659 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-24T08:30:01,460 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T08:30:01,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35659 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T08:30:01,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741835_1011 (size=393) 2024-11-24T08:30:01,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34653 is added to blk_1073741835_1011 (size=393) 2024-11-24T08:30:01,472 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ed518b858264204ec9d728e5a6b90ada, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40 2024-11-24T08:30:01,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741836_1012 (size=76) 2024-11-24T08:30:01,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34653 is added to blk_1073741836_1012 (size=76) 2024-11-24T08:30:01,481 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:30:01,481 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing ed518b858264204ec9d728e5a6b90ada, disabling compactions & flushes 2024-11-24T08:30:01,481 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada. 2024-11-24T08:30:01,481 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada. 2024-11-24T08:30:01,481 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada. after waiting 0 ms 2024-11-24T08:30:01,481 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada. 2024-11-24T08:30:01,481 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada. 2024-11-24T08:30:01,481 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for ed518b858264204ec9d728e5a6b90ada: Waiting for close lock at 1732437001481Disabling compacts and flushes for region at 1732437001481Disabling writes for close at 1732437001481Writing region close event to WAL at 1732437001481Closed at 1732437001481 2024-11-24T08:30:01,483 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T08:30:01,484 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732437001484"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732437001484"}]},"ts":"1732437001484"} 2024-11-24T08:30:01,487 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T08:30:01,489 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T08:30:01,489 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732437001489"}]},"ts":"1732437001489"} 2024-11-24T08:30:01,492 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-24T08:30:01,492 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ed518b858264204ec9d728e5a6b90ada, ASSIGN}] 2024-11-24T08:30:01,494 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ed518b858264204ec9d728e5a6b90ada, ASSIGN 2024-11-24T08:30:01,495 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ed518b858264204ec9d728e5a6b90ada, ASSIGN; state=OFFLINE, location=30c28c82771d,45841,1732437000220; forceNewPlan=false, retain=false 2024-11-24T08:30:01,554 INFO [RS:1;30c28c82771d:34635 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C34635%2C1732437001369, suffix=, logDir=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369, archiveDir=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/oldWALs, maxLogs=32 2024-11-24T08:30:01,556 INFO [RS:1;30c28c82771d:34635 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C34635%2C1732437001369.1732437001555 2024-11-24T08:30:01,563 INFO [RS:1;30c28c82771d:34635 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 2024-11-24T08:30:01,564 DEBUG [RS:1;30c28c82771d:34635 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45471:45471),(127.0.0.1/127.0.0.1:36935:36935)] 2024-11-24T08:30:01,646 INFO [30c28c82771d:35659 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-24T08:30:01,646 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ed518b858264204ec9d728e5a6b90ada, regionState=OPENING, regionLocation=30c28c82771d,45841,1732437000220 2024-11-24T08:30:01,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ed518b858264204ec9d728e5a6b90ada, ASSIGN because future has completed 2024-11-24T08:30:01,651 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ed518b858264204ec9d728e5a6b90ada, server=30c28c82771d,45841,1732437000220}] 2024-11-24T08:30:01,808 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada. 2024-11-24T08:30:01,808 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ed518b858264204ec9d728e5a6b90ada, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:30:01,809 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath ed518b858264204ec9d728e5a6b90ada 2024-11-24T08:30:01,809 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:30:01,809 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ed518b858264204ec9d728e5a6b90ada 2024-11-24T08:30:01,809 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ed518b858264204ec9d728e5a6b90ada 2024-11-24T08:30:01,811 INFO [StoreOpener-ed518b858264204ec9d728e5a6b90ada-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ed518b858264204ec9d728e5a6b90ada 2024-11-24T08:30:01,812 INFO [StoreOpener-ed518b858264204ec9d728e5a6b90ada-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ed518b858264204ec9d728e5a6b90ada columnFamilyName info 2024-11-24T08:30:01,812 DEBUG [StoreOpener-ed518b858264204ec9d728e5a6b90ada-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:01,813 INFO [StoreOpener-ed518b858264204ec9d728e5a6b90ada-1 {}] regionserver.HStore(327): Store=ed518b858264204ec9d728e5a6b90ada/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:30:01,813 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ed518b858264204ec9d728e5a6b90ada 2024-11-24T08:30:01,813 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada 2024-11-24T08:30:01,814 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada 2024-11-24T08:30:01,814 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ed518b858264204ec9d728e5a6b90ada 2024-11-24T08:30:01,814 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ed518b858264204ec9d728e5a6b90ada 2024-11-24T08:30:01,816 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ed518b858264204ec9d728e5a6b90ada 2024-11-24T08:30:01,818 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:30:01,818 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ed518b858264204ec9d728e5a6b90ada; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=721712, jitterRate=-0.08229678869247437}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T08:30:01,818 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ed518b858264204ec9d728e5a6b90ada 2024-11-24T08:30:01,819 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ed518b858264204ec9d728e5a6b90ada: Running coprocessor pre-open hook at 1732437001809Writing region info on filesystem at 1732437001809Initializing all the Stores at 1732437001810 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437001810Cleaning up temporary data from old regions at 1732437001814 (+4 ms)Running coprocessor post-open hooks at 1732437001818 (+4 ms)Region opened successfully at 1732437001819 (+1 ms) 2024-11-24T08:30:01,820 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada., pid=6, masterSystemTime=1732437001804 2024-11-24T08:30:01,823 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada. 2024-11-24T08:30:01,823 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada. 2024-11-24T08:30:01,824 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ed518b858264204ec9d728e5a6b90ada, regionState=OPEN, openSeqNum=2, regionLocation=30c28c82771d,45841,1732437000220 2024-11-24T08:30:01,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ed518b858264204ec9d728e5a6b90ada, server=30c28c82771d,45841,1732437000220 because future has completed 2024-11-24T08:30:01,830 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T08:30:01,831 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ed518b858264204ec9d728e5a6b90ada, server=30c28c82771d,45841,1732437000220 in 178 msec 2024-11-24T08:30:01,833 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T08:30:01,834 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ed518b858264204ec9d728e5a6b90ada, ASSIGN in 339 msec 2024-11-24T08:30:01,835 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T08:30:01,835 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732437001835"}]},"ts":"1732437001835"} 2024-11-24T08:30:01,837 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-24T08:30:01,839 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T08:30:01,841 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 384 msec 2024-11-24T08:30:06,716 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T08:30:06,719 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:06,735 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:06,737 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:06,738 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:06,748 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-24T08:30:09,522 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T08:30:09,522 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T08:30:09,523 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T08:30:09,523 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-24T08:30:09,524 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:30:09,524 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T08:30:09,524 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T08:30:09,524 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-24T08:30:11,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35659 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T08:30:11,497 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-24T08:30:11,498 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-24T08:30:11,501 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T08:30:11,501 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada. 2024-11-24T08:30:11,514 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:30:11,518 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:30:11,518 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:30:11,518 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:30:11,518 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:30:11,519 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c5e4864{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:30:11,519 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4fb99827{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:30:11,634 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@233be953{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/java.io.tmpdir/jetty-localhost-42321-hadoop-hdfs-3_4_1-tests_jar-_-any-6714586520878903932/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:30:11,634 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@71810790{HTTP/1.1, (http/1.1)}{localhost:42321} 2024-11-24T08:30:11,634 INFO [Time-limited test {}] server.Server(415): Started @117757ms 2024-11-24T08:30:11,636 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:30:11,668 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:30:11,671 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:30:11,672 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:30:11,672 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:30:11,672 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:30:11,672 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25509568{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:30:11,673 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41559526{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:30:11,737 WARN [Thread-830 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data6/current/BP-785019810-172.17.0.2-1732436999291/current, will proceed with Du for space computation calculation, 2024-11-24T08:30:11,737 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data5/current/BP-785019810-172.17.0.2-1732436999291/current, will proceed with Du for space computation calculation, 2024-11-24T08:30:11,762 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:30:11,764 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf3a11f951fc680f7 with lease ID 0x9f07083e61bc9296: Processing first storage report for DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079 from datanode DatanodeRegistration(127.0.0.1:38937, datanodeUuid=34692f10-67bd-4984-9ba4-3e22debc31ad, infoPort=34913, infoSecurePort=0, ipcPort=33775, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291) 2024-11-24T08:30:11,764 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf3a11f951fc680f7 with lease ID 0x9f07083e61bc9296: from storage DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079 node DatanodeRegistration(127.0.0.1:38937, datanodeUuid=34692f10-67bd-4984-9ba4-3e22debc31ad, infoPort=34913, infoSecurePort=0, ipcPort=33775, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:30:11,764 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf3a11f951fc680f7 with lease ID 0x9f07083e61bc9296: Processing first storage report for DS-1b4f82d7-7daa-432f-961d-f8ce48394191 from datanode DatanodeRegistration(127.0.0.1:38937, datanodeUuid=34692f10-67bd-4984-9ba4-3e22debc31ad, infoPort=34913, infoSecurePort=0, ipcPort=33775, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291) 2024-11-24T08:30:11,764 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf3a11f951fc680f7 with lease ID 0x9f07083e61bc9296: from storage DS-1b4f82d7-7daa-432f-961d-f8ce48394191 node DatanodeRegistration(127.0.0.1:38937, datanodeUuid=34692f10-67bd-4984-9ba4-3e22debc31ad, infoPort=34913, infoSecurePort=0, ipcPort=33775, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:30:11,806 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@69de1683{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/java.io.tmpdir/jetty-localhost-42589-hadoop-hdfs-3_4_1-tests_jar-_-any-12980294863555827189/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:30:11,806 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2801262{HTTP/1.1, (http/1.1)}{localhost:42589} 2024-11-24T08:30:11,807 INFO [Time-limited test {}] server.Server(415): Started @117929ms 2024-11-24T08:30:11,808 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:30:11,851 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:30:11,856 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:30:11,857 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:30:11,857 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:30:11,857 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:30:11,861 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@412902c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:30:11,861 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b4117c9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:30:11,930 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data7/current/BP-785019810-172.17.0.2-1732436999291/current, will proceed with Du for space computation calculation, 2024-11-24T08:30:11,930 WARN [Thread-865 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data8/current/BP-785019810-172.17.0.2-1732436999291/current, will proceed with Du for space computation calculation, 2024-11-24T08:30:11,957 WARN [Thread-844 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:30:11,959 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcdfb42ce13e9ce3a with lease ID 0x9f07083e61bc9297: Processing first storage report for DS-650d37d5-2847-4e3d-978c-d2ef36919183 from datanode DatanodeRegistration(127.0.0.1:33711, datanodeUuid=0c965ac5-e2b4-4c19-98f8-ff81aa8da9d4, infoPort=41397, infoSecurePort=0, ipcPort=38455, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291) 2024-11-24T08:30:11,960 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcdfb42ce13e9ce3a with lease ID 0x9f07083e61bc9297: from storage DS-650d37d5-2847-4e3d-978c-d2ef36919183 node DatanodeRegistration(127.0.0.1:33711, datanodeUuid=0c965ac5-e2b4-4c19-98f8-ff81aa8da9d4, infoPort=41397, infoSecurePort=0, ipcPort=38455, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:30:11,960 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcdfb42ce13e9ce3a with lease ID 0x9f07083e61bc9297: Processing first storage report for DS-c2f53d46-70fe-44a5-b1d7-919577b41da6 from datanode DatanodeRegistration(127.0.0.1:33711, datanodeUuid=0c965ac5-e2b4-4c19-98f8-ff81aa8da9d4, infoPort=41397, infoSecurePort=0, ipcPort=38455, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291) 2024-11-24T08:30:11,960 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcdfb42ce13e9ce3a with lease ID 0x9f07083e61bc9297: from storage DS-c2f53d46-70fe-44a5-b1d7-919577b41da6 node DatanodeRegistration(127.0.0.1:33711, datanodeUuid=0c965ac5-e2b4-4c19-98f8-ff81aa8da9d4, infoPort=41397, infoSecurePort=0, ipcPort=38455, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:30:11,986 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1b0035e1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/java.io.tmpdir/jetty-localhost-41351-hadoop-hdfs-3_4_1-tests_jar-_-any-3058865666515443879/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:30:11,986 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@66046020{HTTP/1.1, (http/1.1)}{localhost:41351} 2024-11-24T08:30:11,986 INFO [Time-limited test {}] server.Server(415): Started @118109ms 2024-11-24T08:30:11,988 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:30:12,088 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data9/current/BP-785019810-172.17.0.2-1732436999291/current, will proceed with Du for space computation calculation, 2024-11-24T08:30:12,088 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data10/current/BP-785019810-172.17.0.2-1732436999291/current, will proceed with Du for space computation calculation, 2024-11-24T08:30:12,104 WARN [Thread-879 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:30:12,107 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xca858273160e35de with lease ID 0x9f07083e61bc9298: Processing first storage report for DS-f644a837-8d57-4dba-ae2f-d63ab65f1939 from datanode DatanodeRegistration(127.0.0.1:33917, datanodeUuid=948161ed-05be-4a7c-a49c-c6650364918f, infoPort=40801, infoSecurePort=0, ipcPort=40985, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291) 2024-11-24T08:30:12,107 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xca858273160e35de with lease ID 0x9f07083e61bc9298: from storage DS-f644a837-8d57-4dba-ae2f-d63ab65f1939 node DatanodeRegistration(127.0.0.1:33917, datanodeUuid=948161ed-05be-4a7c-a49c-c6650364918f, infoPort=40801, infoSecurePort=0, ipcPort=40985, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:30:12,107 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xca858273160e35de with lease ID 0x9f07083e61bc9298: Processing first storage report for DS-c49265e6-c82d-4389-9053-e84b5f8c3efd from datanode DatanodeRegistration(127.0.0.1:33917, datanodeUuid=948161ed-05be-4a7c-a49c-c6650364918f, infoPort=40801, infoSecurePort=0, ipcPort=40985, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291) 2024-11-24T08:30:12,107 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xca858273160e35de with lease ID 0x9f07083e61bc9298: from storage DS-c49265e6-c82d-4389-9053-e84b5f8c3efd node DatanodeRegistration(127.0.0.1:33917, datanodeUuid=948161ed-05be-4a7c-a49c-c6650364918f, infoPort=40801, infoSecurePort=0, ipcPort=40985, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:30:12,108 WARN [ResponseProcessor for block BP-785019810-172.17.0.2-1732436999291:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-785019810-172.17.0.2-1732436999291:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:12,108 WARN [ResponseProcessor for block BP-785019810-172.17.0.2-1732436999291:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-785019810-172.17.0.2-1732436999291:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:12,108 WARN [ResponseProcessor for block BP-785019810-172.17.0.2-1732436999291:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-785019810-172.17.0.2-1732436999291:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:12,108 WARN [DataStreamer for file /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 block BP-785019810-172.17.0.2-1732436999291:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK], DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK]) is bad. 2024-11-24T08:30:12,108 WARN [DataStreamer for file /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/WALs/30c28c82771d,35659,1732437000158/30c28c82771d%2C35659%2C1732437000158.1732437000316 block BP-785019810-172.17.0.2-1732436999291:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK], DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK]) is bad. 2024-11-24T08:30:12,108 WARN [DataStreamer for file /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437000642 block BP-785019810-172.17.0.2-1732436999291:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK], DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK]) is bad. 2024-11-24T08:30:12,109 WARN [ResponseProcessor for block BP-785019810-172.17.0.2-1732436999291:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-785019810-172.17.0.2-1732436999291:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-785019810-172.17.0.2-1732436999291:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:12,109 WARN [DataStreamer for file /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta block BP-785019810-172.17.0.2-1732436999291:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK], DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK]) is bad. 2024-11-24T08:30:12,109 WARN [PacketResponder: BP-785019810-172.17.0.2-1732436999291:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44185] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:12,110 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1018167684_22 at /127.0.0.1:42290 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44185:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42290 dst: /127.0.0.1:44185 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:12,110 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1943146851_22 at /127.0.0.1:42896 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:44185:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42896 dst: /127.0.0.1:44185 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:12,110 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:59864 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34653:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59864 dst: /127.0.0.1:34653 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:12,110 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1943146851_22 at /127.0.0.1:59896 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:34653:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59896 dst: /127.0.0.1:34653 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:12,110 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:42856 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44185:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42856 dst: /127.0.0.1:44185 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:12,110 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1018167684_22 at /127.0.0.1:59684 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34653:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59684 dst: /127.0.0.1:34653 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:12,110 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:59852 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34653:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59852 dst: /127.0.0.1:34653 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:12,111 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:42868 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44185:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42868 dst: /127.0.0.1:44185 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:12,113 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7efd52f6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:30:12,114 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4577488a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:30:12,114 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:30:12,114 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e20426d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:30:12,114 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c80aceb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/hadoop.log.dir/,STOPPED} 2024-11-24T08:30:12,115 WARN [BP-785019810-172.17.0.2-1732436999291 heartbeating to localhost/127.0.0.1:45383 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:30:12,115 WARN [BP-785019810-172.17.0.2-1732436999291 heartbeating to localhost/127.0.0.1:45383 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-785019810-172.17.0.2-1732436999291 (Datanode Uuid 5ec8d436-8ea8-4223-9061-288fdef1d878) service to localhost/127.0.0.1:45383 2024-11-24T08:30:12,115 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:30:12,115 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:30:12,115 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data3/current/BP-785019810-172.17.0.2-1732436999291 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:30:12,116 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data4/current/BP-785019810-172.17.0.2-1732436999291 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:30:12,116 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:30:12,116 WARN [DataStreamer for file /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/WALs/30c28c82771d,35659,1732437000158/30c28c82771d%2C35659%2C1732437000158.1732437000316 block BP-785019810-172.17.0.2-1732436999291:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:12,121 WARN [DataStreamer for file /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437000642 block BP-785019810-172.17.0.2-1732436999291:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:12,121 WARN [DataStreamer for file /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta block BP-785019810-172.17.0.2-1732436999291:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:12,122 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@46e8e9ad {}] datanode.DataXceiver(331): 127.0.0.1:34653:DataXceiver error processing unknown operation src: /127.0.0.1:56844 dst: /127.0.0.1:34653 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:12,123 WARN [DataStreamer for file /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 block BP-785019810-172.17.0.2-1732436999291:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:12,123 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1d790455{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:30:12,123 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@795c5052{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:30:12,124 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:30:12,124 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2df55a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:30:12,124 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@259c861e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/hadoop.log.dir/,STOPPED} 2024-11-24T08:30:12,125 WARN [BP-785019810-172.17.0.2-1732436999291 heartbeating to localhost/127.0.0.1:45383 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:30:12,125 WARN [BP-785019810-172.17.0.2-1732436999291 heartbeating to localhost/127.0.0.1:45383 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-785019810-172.17.0.2-1732436999291 (Datanode Uuid 6cff4492-6289-4a01-bb4b-e9d857fcf40b) service to localhost/127.0.0.1:45383 2024-11-24T08:30:12,125 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:30:12,125 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:30:12,126 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data1/current/BP-785019810-172.17.0.2-1732436999291 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:30:12,126 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data2/current/BP-785019810-172.17.0.2-1732436999291 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:30:12,126 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:30:12,130 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada., hostname=30c28c82771d,45841,1732437000220, seqNum=2] 2024-11-24T08:30:12,132 ERROR [FSHLog-0-hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40-prefix:30c28c82771d,45841,1732437000220 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:12,132 WARN [FSHLog-0-hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40-prefix:30c28c82771d,45841,1732437000220 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:12,132 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:12,132 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30c28c82771d%2C45841%2C1732437000220:(num 1732437000642) roll requested 2024-11-24T08:30:12,133 INFO [regionserver/30c28c82771d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C45841%2C1732437000220.1732437012133 2024-11-24T08:30:12,139 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:12,139 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:12,139 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:12,139 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:12,139 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:12,139 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437000642 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437012133 2024-11-24T08:30:12,139 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:12,140 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41397:41397),(127.0.0.1/127.0.0.1:34913:34913)] 2024-11-24T08:30:12,140 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:12,140 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437000642 is not closed yet, will try archiving it next time 2024-11-24T08:30:12,141 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-24T08:30:12,141 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-24T08:30:12,141 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437000642 2024-11-24T08:30:12,144 WARN [IPC Server handler 1 on default port 45383 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437000642 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-24T08:30:12,147 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437000642 after 5ms 2024-11-24T08:30:12,296 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:13,419 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:14,140 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:14,142 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437012133 2024-11-24T08:30:14,142 WARN [ResponseProcessor for block BP-785019810-172.17.0.2-1732436999291:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-785019810-172.17.0.2-1732436999291:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:14,142 WARN [DataStreamer for file /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437012133 block BP-785019810-172.17.0.2-1732436999291:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK], DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]) is bad. 2024-11-24T08:30:14,143 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:40126 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:33711:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40126 dst: /127.0.0.1:33711 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:14,143 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:33010 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:38937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33010 dst: /127.0.0.1:38937 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:14,144 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@69de1683{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:30:14,145 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2801262{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:30:14,145 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:30:14,145 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41559526{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:30:14,145 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25509568{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/hadoop.log.dir/,STOPPED} 2024-11-24T08:30:14,146 WARN [BP-785019810-172.17.0.2-1732436999291 heartbeating to localhost/127.0.0.1:45383 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:30:14,146 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:30:14,146 WARN [BP-785019810-172.17.0.2-1732436999291 heartbeating to localhost/127.0.0.1:45383 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-785019810-172.17.0.2-1732436999291 (Datanode Uuid 0c965ac5-e2b4-4c19-98f8-ff81aa8da9d4) service to localhost/127.0.0.1:45383 2024-11-24T08:30:14,146 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:30:14,147 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data7/current/BP-785019810-172.17.0.2-1732436999291 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:30:14,147 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data8/current/BP-785019810-172.17.0.2-1732436999291 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:30:14,147 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:30:14,297 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:15,419 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:16,141 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:16,141 WARN [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]] 2024-11-24T08:30:16,142 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30c28c82771d%2C45841%2C1732437000220:(num 1732437012133) roll requested 2024-11-24T08:30:16,142 INFO [regionserver/30c28c82771d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C45841%2C1732437000220.1732437016142 2024-11-24T08:30:16,145 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:16,146 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]) is bad. 2024-11-24T08:30:16,146 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741839_1021 2024-11-24T08:30:16,148 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437000642 after 4007ms 2024-11-24T08:30:16,148 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK] 2024-11-24T08:30:16,152 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:16,152 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK], DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]) is bad. 2024-11-24T08:30:16,152 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741840_1022 2024-11-24T08:30:16,153 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK] 2024-11-24T08:30:16,153 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T08:30:16,157 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:16,158 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:16,158 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:16,158 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:16,158 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:16,158 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437012133 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437016142 2024-11-24T08:30:16,159 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34913:34913),(127.0.0.1/127.0.0.1:40801:40801)] 2024-11-24T08:30:16,159 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437000642 is not closed yet, will try archiving it next time 2024-11-24T08:30:16,159 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437012133 is not closed yet, will try archiving it next time 2024-11-24T08:30:16,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38937 is added to blk_1073741838_1020 (size=3600) 2024-11-24T08:30:16,297 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:16,561 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437000642 is not closed yet, will try archiving it next time 2024-11-24T08:30:17,420 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:17,781 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5df0d8bb[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38937, datanodeUuid=34692f10-67bd-4984-9ba4-3e22debc31ad, infoPort=34913, infoSecurePort=0, ipcPort=33775, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291):Failed to transfer BP-785019810-172.17.0.2-1732436999291:blk_1073741838_1020 to 127.0.0.1:33711 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:18,157 WARN [ResponseProcessor for block BP-785019810-172.17.0.2-1732436999291:blk_1073741841_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-785019810-172.17.0.2-1732436999291:blk_1073741841_1023 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:18,157 WARN [DataStreamer for file /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437016142 block BP-785019810-172.17.0.2-1732436999291:blk_1073741841_1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]) is bad. 2024-11-24T08:30:18,158 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:33032 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:38937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33032 dst: /127.0.0.1:38937 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:18,158 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:37874 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:33917:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37874 dst: /127.0.0.1:33917 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:18,159 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@233be953{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:30:18,159 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:18,160 WARN [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK]] 2024-11-24T08:30:18,160 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30c28c82771d%2C45841%2C1732437000220:(num 1732437016142) roll requested 2024-11-24T08:30:18,160 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@71810790{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:30:18,160 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:30:18,160 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4fb99827{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:30:18,160 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c5e4864{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/hadoop.log.dir/,STOPPED} 2024-11-24T08:30:18,160 INFO [regionserver/30c28c82771d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C45841%2C1732437000220.1732437018160 2024-11-24T08:30:18,162 WARN [BP-785019810-172.17.0.2-1732436999291 heartbeating to localhost/127.0.0.1:45383 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:30:18,162 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:30:18,162 WARN [BP-785019810-172.17.0.2-1732436999291 heartbeating to localhost/127.0.0.1:45383 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-785019810-172.17.0.2-1732436999291 (Datanode Uuid 34692f10-67bd-4984-9ba4-3e22debc31ad) service to localhost/127.0.0.1:45383 2024-11-24T08:30:18,162 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:30:18,162 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data5/current/BP-785019810-172.17.0.2-1732436999291 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:30:18,163 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data6/current/BP-785019810-172.17.0.2-1732436999291 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:30:18,163 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:30:18,163 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:18,164 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK], DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]) is bad. 2024-11-24T08:30:18,164 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741842_1025 2024-11-24T08:30:18,164 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK] 2024-11-24T08:30:18,166 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:18,166 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]) is bad. 2024-11-24T08:30:18,166 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741843_1026 2024-11-24T08:30:18,166 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK] 2024-11-24T08:30:18,168 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:18,168 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK], DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]) is bad. 2024-11-24T08:30:18,168 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741844_1027 2024-11-24T08:30:18,168 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK] 2024-11-24T08:30:18,169 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:18,170 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK]) is bad. 2024-11-24T08:30:18,170 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741845_1028 2024-11-24T08:30:18,170 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK] 2024-11-24T08:30:18,171 WARN [IPC Server handler 2 on default port 45383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T08:30:18,171 WARN [IPC Server handler 2 on default port 45383 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T08:30:18,172 WARN [IPC Server handler 2 on default port 45383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T08:30:18,174 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:18,175 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:18,175 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:18,175 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:18,175 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:18,176 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437016142 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437018160 2024-11-24T08:30:18,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741841_1024 (size=3600) 2024-11-24T08:30:18,179 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437000642 is not closed yet, will try archiving it next time 2024-11-24T08:30:18,189 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40801:40801)] 2024-11-24T08:30:18,189 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437000642 is not closed yet, will try archiving it next time 2024-11-24T08:30:18,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45841 {}] regionserver.HRegion(8855): Flush requested on ed518b858264204ec9d728e5a6b90ada 2024-11-24T08:30:18,190 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ed518b858264204ec9d728e5a6b90ada 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:30:18,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/164b79a885bb43eda3147462612e2295 is 1080, key is row0002/info:/1732437014149/Put/seqid=0 2024-11-24T08:30:18,215 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:18,216 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK], DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]) is bad. 2024-11-24T08:30:18,216 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741847_1030 2024-11-24T08:30:18,216 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK] 2024-11-24T08:30:18,218 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:18,218 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK], DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]) is bad. 2024-11-24T08:30:18,218 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741848_1031 2024-11-24T08:30:18,219 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK] 2024-11-24T08:30:18,220 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:18,220 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]) is bad. 2024-11-24T08:30:18,220 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741849_1032 2024-11-24T08:30:18,221 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK] 2024-11-24T08:30:18,222 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:18,222 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK]) is bad. 2024-11-24T08:30:18,222 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741850_1033 2024-11-24T08:30:18,223 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK] 2024-11-24T08:30:18,223 WARN [IPC Server handler 1 on default port 45383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T08:30:18,223 WARN [IPC Server handler 1 on default port 45383 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T08:30:18,223 WARN [IPC Server handler 1 on default port 45383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T08:30:18,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741851_1034 (size=10347) 2024-11-24T08:30:18,297 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:18,630 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/164b79a885bb43eda3147462612e2295 2024-11-24T08:30:18,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/164b79a885bb43eda3147462612e2295 as hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/164b79a885bb43eda3147462612e2295 2024-11-24T08:30:18,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/164b79a885bb43eda3147462612e2295, entries=5, sequenceid=11, filesize=10.1 K 2024-11-24T08:30:18,647 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for ed518b858264204ec9d728e5a6b90ada in 458ms, sequenceid=11, compaction requested=false 2024-11-24T08:30:18,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ed518b858264204ec9d728e5a6b90ada: 2024-11-24T08:30:18,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45841 {}] regionserver.HRegion(8855): Flush requested on ed518b858264204ec9d728e5a6b90ada 2024-11-24T08:30:18,817 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ed518b858264204ec9d728e5a6b90ada 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-24T08:30:18,823 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/0a17bda815984f9484f14e731bfbc32a is 1080, key is row0007/info:/1732437018191/Put/seqid=0 2024-11-24T08:30:18,825 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:18,825 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK], DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]) is bad. 2024-11-24T08:30:18,825 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741852_1035 2024-11-24T08:30:18,826 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK] 2024-11-24T08:30:18,827 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:18,828 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK], DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]) is bad. 2024-11-24T08:30:18,828 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741853_1036 2024-11-24T08:30:18,828 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK] 2024-11-24T08:30:18,830 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34653 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:18,831 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK], DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]) is bad. 2024-11-24T08:30:18,830 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:37900 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data10]'}, localName='127.0.0.1:33917', datanodeUuid='948161ed-05be-4a7c-a49c-c6650364918f', xmitsInProgress=0}:Exception transferring block BP-785019810-172.17.0.2-1732436999291:blk_1073741854_1037 to mirror 127.0.0.1:34653 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:18,831 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741854_1037 2024-11-24T08:30:18,831 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:37900 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:30:18,831 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:37900 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:33917:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37900 dst: /127.0.0.1:33917 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:18,831 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK] 2024-11-24T08:30:18,832 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:18,833 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK]) is bad. 2024-11-24T08:30:18,833 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741855_1038 2024-11-24T08:30:18,833 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK] 2024-11-24T08:30:18,834 WARN [IPC Server handler 1 on default port 45383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T08:30:18,834 WARN [IPC Server handler 1 on default port 45383 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T08:30:18,834 WARN [IPC Server handler 1 on default port 45383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T08:30:18,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741856_1039 (size=12506) 2024-11-24T08:30:18,838 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/0a17bda815984f9484f14e731bfbc32a 2024-11-24T08:30:18,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/0a17bda815984f9484f14e731bfbc32a as hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/0a17bda815984f9484f14e731bfbc32a 2024-11-24T08:30:18,853 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/0a17bda815984f9484f14e731bfbc32a, entries=7, sequenceid=24, filesize=12.2 K 2024-11-24T08:30:18,854 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=0 B/0 for ed518b858264204ec9d728e5a6b90ada in 37ms, sequenceid=24, compaction requested=false 2024-11-24T08:30:18,854 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ed518b858264204ec9d728e5a6b90ada: 2024-11-24T08:30:18,854 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-24T08:30:18,854 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:30:18,855 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/0a17bda815984f9484f14e731bfbc32a because midkey is the same as first or last row 2024-11-24T08:30:19,420 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:20,189 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:20,189 WARN [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK]] 2024-11-24T08:30:20,189 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30c28c82771d%2C45841%2C1732437000220:(num 1732437018160) roll requested 2024-11-24T08:30:20,190 INFO [regionserver/30c28c82771d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C45841%2C1732437000220.1732437020189 2024-11-24T08:30:20,193 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:20,193 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK], DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]) is bad. 2024-11-24T08:30:20,193 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741857_1040 2024-11-24T08:30:20,194 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK] 2024-11-24T08:30:20,195 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:20,196 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK], DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK]) is bad. 2024-11-24T08:30:20,196 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741858_1041 2024-11-24T08:30:20,196 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK] 2024-11-24T08:30:20,198 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:20,198 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK], DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]) is bad. 2024-11-24T08:30:20,198 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741859_1042 2024-11-24T08:30:20,199 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK] 2024-11-24T08:30:20,200 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:20,200 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]) is bad. 2024-11-24T08:30:20,200 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741860_1043 2024-11-24T08:30:20,201 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK] 2024-11-24T08:30:20,202 WARN [IPC Server handler 1 on default port 45383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T08:30:20,202 WARN [IPC Server handler 1 on default port 45383 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T08:30:20,202 WARN [IPC Server handler 1 on default port 45383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T08:30:20,210 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:20,210 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:20,210 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:20,210 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:20,210 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:20,211 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437018160 with entries=21, filesize=20.81 KB; new WAL /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437020189 2024-11-24T08:30:20,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741846_1029 (size=21316) 2024-11-24T08:30:20,218 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40801:40801)] 2024-11-24T08:30:20,218 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437000642 is not closed yet, will try archiving it next time 2024-11-24T08:30:20,218 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437018160 is not closed yet, will try archiving it next time 2024-11-24T08:30:20,220 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437012133 to hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/oldWALs/30c28c82771d%2C45841%2C1732437000220.1732437012133 2024-11-24T08:30:20,223 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437016142 to hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/oldWALs/30c28c82771d%2C45841%2C1732437000220.1732437016142 2024-11-24T08:30:20,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45841 {}] regionserver.HRegion(8855): Flush requested on ed518b858264204ec9d728e5a6b90ada 2024-11-24T08:30:20,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ed518b858264204ec9d728e5a6b90ada 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T08:30:20,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/415defab429f4b29a2e0de1090cac805 is 1079, key is tmprow/info:/1732437020236/Put/seqid=0 2024-11-24T08:30:20,251 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:20,251 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]) is bad. 2024-11-24T08:30:20,252 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741862_1045 2024-11-24T08:30:20,253 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK] 2024-11-24T08:30:20,254 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:20,254 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK], DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK]) is bad. 2024-11-24T08:30:20,254 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741863_1046 2024-11-24T08:30:20,255 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK] 2024-11-24T08:30:20,256 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:20,257 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]) is bad. 2024-11-24T08:30:20,257 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741864_1047 2024-11-24T08:30:20,257 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK] 2024-11-24T08:30:20,262 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38937 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:20,262 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:37932 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data10]'}, localName='127.0.0.1:33917', datanodeUuid='948161ed-05be-4a7c-a49c-c6650364918f', xmitsInProgress=0}:Exception transferring block BP-785019810-172.17.0.2-1732436999291:blk_1073741865_1048 to mirror 127.0.0.1:38937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:20,262 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK], DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]) is bad. 2024-11-24T08:30:20,262 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741865_1048 2024-11-24T08:30:20,262 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:37932 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:30:20,263 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:37932 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:33917:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37932 dst: /127.0.0.1:33917 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:20,263 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK] 2024-11-24T08:30:20,264 WARN [IPC Server handler 2 on default port 45383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T08:30:20,264 WARN [IPC Server handler 2 on default port 45383 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T08:30:20,264 WARN [IPC Server handler 2 on default port 45383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T08:30:20,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741866_1049 (size=6027) 2024-11-24T08:30:20,298 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:20,614 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437000642 is not closed yet, will try archiving it next time 2024-11-24T08:30:20,672 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/415defab429f4b29a2e0de1090cac805 2024-11-24T08:30:20,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/415defab429f4b29a2e0de1090cac805 as hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/415defab429f4b29a2e0de1090cac805 2024-11-24T08:30:20,688 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/415defab429f4b29a2e0de1090cac805, entries=1, sequenceid=34, filesize=5.9 K 2024-11-24T08:30:20,689 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for ed518b858264204ec9d728e5a6b90ada in 452ms, sequenceid=34, compaction requested=true 2024-11-24T08:30:20,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ed518b858264204ec9d728e5a6b90ada: 2024-11-24T08:30:20,690 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-24T08:30:20,690 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:30:20,690 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/0a17bda815984f9484f14e731bfbc32a because midkey is the same as first or last row 2024-11-24T08:30:20,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed518b858264204ec9d728e5a6b90ada:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:30:20,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:30:20,691 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:30:20,693 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:30:20,693 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.HStore(1541): ed518b858264204ec9d728e5a6b90ada/info is initiating minor compaction (all files) 2024-11-24T08:30:20,693 INFO [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ed518b858264204ec9d728e5a6b90ada/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada. 2024-11-24T08:30:20,693 INFO [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/164b79a885bb43eda3147462612e2295, hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/0a17bda815984f9484f14e731bfbc32a, hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/415defab429f4b29a2e0de1090cac805] into tmpdir=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp, totalSize=28.2 K 2024-11-24T08:30:20,694 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] compactions.Compactor(225): Compacting 164b79a885bb43eda3147462612e2295, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732437014149 2024-11-24T08:30:20,694 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0a17bda815984f9484f14e731bfbc32a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732437018191 2024-11-24T08:30:20,695 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] compactions.Compactor(225): Compacting 415defab429f4b29a2e0de1090cac805, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732437020236 2024-11-24T08:30:20,712 INFO [RS:0;30c28c82771d:45841-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed518b858264204ec9d728e5a6b90ada#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:30:20,712 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/ef49d752d02145a88893f7a4c7f26e11 is 1080, key is row0002/info:/1732437014149/Put/seqid=0 2024-11-24T08:30:20,714 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:20,714 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK], DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]) is bad. 2024-11-24T08:30:20,714 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741867_1050 2024-11-24T08:30:20,715 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK] 2024-11-24T08:30:20,717 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:41784 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741868_1051] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data10]'}, localName='127.0.0.1:33917', datanodeUuid='948161ed-05be-4a7c-a49c-c6650364918f', xmitsInProgress=0}:Exception transferring block BP-785019810-172.17.0.2-1732436999291:blk_1073741868_1051 to mirror 127.0.0.1:38937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:20,717 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38937 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:20,717 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:41784 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741868_1051] {}] datanode.BlockReceiver(316): Block 1073741868 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:30:20,717 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK], DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]) is bad. 2024-11-24T08:30:20,717 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741868_1051 2024-11-24T08:30:20,717 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:41784 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741868_1051] {}] datanode.DataXceiver(331): 127.0.0.1:33917:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41784 dst: /127.0.0.1:33917 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:20,718 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK] 2024-11-24T08:30:20,719 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:20,720 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK]) is bad. 2024-11-24T08:30:20,720 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741869_1052 2024-11-24T08:30:20,720 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK] 2024-11-24T08:30:20,722 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:41800 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data10]'}, localName='127.0.0.1:33917', datanodeUuid='948161ed-05be-4a7c-a49c-c6650364918f', xmitsInProgress=0}:Exception transferring block BP-785019810-172.17.0.2-1732436999291:blk_1073741870_1053 to mirror 127.0.0.1:34653 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:20,722 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34653 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:20,722 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:41800 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:30:20,723 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK], DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]) is bad. 2024-11-24T08:30:20,723 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741870_1053 2024-11-24T08:30:20,723 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:41800 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:33917:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41800 dst: /127.0.0.1:33917 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:20,723 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK] 2024-11-24T08:30:20,724 WARN [IPC Server handler 1 on default port 45383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T08:30:20,724 WARN [IPC Server handler 1 on default port 45383 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T08:30:20,724 WARN [IPC Server handler 1 on default port 45383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T08:30:20,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741871_1054 (size=17994) 2024-11-24T08:30:21,108 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@40a823e5[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33917, datanodeUuid=948161ed-05be-4a7c-a49c-c6650364918f, infoPort=40801, infoSecurePort=0, ipcPort=40985, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291):Failed to transfer BP-785019810-172.17.0.2-1732436999291:blk_1073741841_1024 to 127.0.0.1:33711 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:21,108 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4e6fd580[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33917, datanodeUuid=948161ed-05be-4a7c-a49c-c6650364918f, infoPort=40801, infoSecurePort=0, ipcPort=40985, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291):Failed to transfer BP-785019810-172.17.0.2-1732436999291:blk_1073741851_1034 to 127.0.0.1:34653 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:21,134 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/ef49d752d02145a88893f7a4c7f26e11 as hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/ef49d752d02145a88893f7a4c7f26e11 2024-11-24T08:30:21,142 INFO [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ed518b858264204ec9d728e5a6b90ada/info of ed518b858264204ec9d728e5a6b90ada into ef49d752d02145a88893f7a4c7f26e11(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:30:21,142 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ed518b858264204ec9d728e5a6b90ada: 2024-11-24T08:30:21,142 INFO [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada., storeName=ed518b858264204ec9d728e5a6b90ada/info, priority=13, startTime=1732437020690; duration=0sec 2024-11-24T08:30:21,142 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-24T08:30:21,142 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:30:21,142 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/ef49d752d02145a88893f7a4c7f26e11 because midkey is the same as first or last row 2024-11-24T08:30:21,142 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-24T08:30:21,142 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:30:21,142 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/ef49d752d02145a88893f7a4c7f26e11 because midkey is the same as first or last row 2024-11-24T08:30:21,143 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-24T08:30:21,143 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:30:21,143 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/ef49d752d02145a88893f7a4c7f26e11 because midkey is the same as first or last row 2024-11-24T08:30:21,143 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:30:21,143 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed518b858264204ec9d728e5a6b90ada:info 2024-11-24T08:30:21,421 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:21,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45841 {}] regionserver.HRegion(8855): Flush requested on ed518b858264204ec9d728e5a6b90ada 2024-11-24T08:30:21,657 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ed518b858264204ec9d728e5a6b90ada 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T08:30:21,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/f76e5a64d21d4c299b9ed6ed42244d61 is 1079, key is tmprow/info:/1732437021656/Put/seqid=0 2024-11-24T08:30:21,664 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:21,664 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK]) is bad. 2024-11-24T08:30:21,664 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741872_1055 2024-11-24T08:30:21,665 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44185,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK] 2024-11-24T08:30:21,668 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34653 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:21,668 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:41814 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741873_1056] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data10]'}, localName='127.0.0.1:33917', datanodeUuid='948161ed-05be-4a7c-a49c-c6650364918f', xmitsInProgress=0}:Exception transferring block BP-785019810-172.17.0.2-1732436999291:blk_1073741873_1056 to mirror 127.0.0.1:34653 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:21,668 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK], DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]) is bad. 2024-11-24T08:30:21,668 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741873_1056 2024-11-24T08:30:21,668 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:41814 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741873_1056] {}] datanode.BlockReceiver(316): Block 1073741873 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:30:21,668 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:41814 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741873_1056] {}] datanode.DataXceiver(331): 127.0.0.1:33917:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41814 dst: /127.0.0.1:33917 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:21,668 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK] 2024-11-24T08:30:21,671 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:41822 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data10]'}, localName='127.0.0.1:33917', datanodeUuid='948161ed-05be-4a7c-a49c-c6650364918f', xmitsInProgress=0}:Exception transferring block BP-785019810-172.17.0.2-1732436999291:blk_1073741874_1057 to mirror 127.0.0.1:38937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:21,671 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38937 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:21,671 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:41822 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:30:21,671 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK], DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]) is bad. 2024-11-24T08:30:21,671 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741874_1057 2024-11-24T08:30:21,671 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:41822 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:33917:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41822 dst: /127.0.0.1:33917 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:21,671 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK] 2024-11-24T08:30:21,673 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33711 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:21,673 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:41828 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741875_1058] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data10]'}, localName='127.0.0.1:33917', datanodeUuid='948161ed-05be-4a7c-a49c-c6650364918f', xmitsInProgress=0}:Exception transferring block BP-785019810-172.17.0.2-1732436999291:blk_1073741875_1058 to mirror 127.0.0.1:33711 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:21,673 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK], DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]) is bad. 2024-11-24T08:30:21,673 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741875_1058 2024-11-24T08:30:21,673 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:41828 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741875_1058] {}] datanode.BlockReceiver(316): Block 1073741875 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:30:21,674 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:41828 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741875_1058] {}] datanode.DataXceiver(331): 127.0.0.1:33917:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41828 dst: /127.0.0.1:33917 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:21,674 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK] 2024-11-24T08:30:21,675 WARN [IPC Server handler 0 on default port 45383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T08:30:21,675 WARN [IPC Server handler 0 on default port 45383 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T08:30:21,675 WARN [IPC Server handler 0 on default port 45383 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T08:30:21,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741876_1059 (size=6027) 2024-11-24T08:30:21,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/f76e5a64d21d4c299b9ed6ed42244d61 2024-11-24T08:30:21,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/f76e5a64d21d4c299b9ed6ed42244d61 as hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/f76e5a64d21d4c299b9ed6ed42244d61 2024-11-24T08:30:21,697 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/f76e5a64d21d4c299b9ed6ed42244d61, entries=1, sequenceid=45, filesize=5.9 K 2024-11-24T08:30:21,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ed518b858264204ec9d728e5a6b90ada in 41ms, sequenceid=45, compaction requested=false 2024-11-24T08:30:21,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ed518b858264204ec9d728e5a6b90ada: 2024-11-24T08:30:21,699 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-24T08:30:21,699 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:30:21,699 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/ef49d752d02145a88893f7a4c7f26e11 because midkey is the same as first or last row 2024-11-24T08:30:22,108 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4e6fd580[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33917, datanodeUuid=948161ed-05be-4a7c-a49c-c6650364918f, infoPort=40801, infoSecurePort=0, ipcPort=40985, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291):Failed to transfer BP-785019810-172.17.0.2-1732436999291:blk_1073741856_1039 to 127.0.0.1:38937 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:22,108 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@40a823e5[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33917, datanodeUuid=948161ed-05be-4a7c-a49c-c6650364918f, infoPort=40801, infoSecurePort=0, ipcPort=40985, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291):Failed to transfer BP-785019810-172.17.0.2-1732436999291:blk_1073741846_1029 to 127.0.0.1:38937 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:22,220 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:22,221 WARN [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-24T08:30:22,273 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:30:22,276 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:30:22,277 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:30:22,277 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:30:22,277 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:30:22,277 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@89ed911{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:30:22,278 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10460b4f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:30:22,298 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:22,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e14b80{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/java.io.tmpdir/jetty-localhost-39113-hadoop-hdfs-3_4_1-tests_jar-_-any-16611105158511453872/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:30:22,396 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@661cd0bd{HTTP/1.1, (http/1.1)}{localhost:39113} 2024-11-24T08:30:22,396 INFO [Time-limited test {}] server.Server(415): Started @128519ms 2024-11-24T08:30:22,397 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:30:22,514 WARN [Thread-977 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:30:22,524 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb6278c08e282496e with lease ID 0x9f07083e61bc9299: from storage DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69 node DatanodeRegistration(127.0.0.1:40119, datanodeUuid=5ec8d436-8ea8-4223-9061-288fdef1d878, infoPort=35021, infoSecurePort=0, ipcPort=42945, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T08:30:22,524 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb6278c08e282496e with lease ID 0x9f07083e61bc9299: from storage DS-b5f0f57c-56ab-4193-b307-d8708a36ce0f node DatanodeRegistration(127.0.0.1:40119, datanodeUuid=5ec8d436-8ea8-4223-9061-288fdef1d878, infoPort=35021, infoSecurePort=0, ipcPort=42945, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:30:23,421 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:24,108 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@40a823e5[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33917, datanodeUuid=948161ed-05be-4a7c-a49c-c6650364918f, infoPort=40801, infoSecurePort=0, ipcPort=40985, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291):Failed to transfer BP-785019810-172.17.0.2-1732436999291:blk_1073741866_1049 to 127.0.0.1:38937 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:24,108 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4e6fd580[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33917, datanodeUuid=948161ed-05be-4a7c-a49c-c6650364918f, infoPort=40801, infoSecurePort=0, ipcPort=40985, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291):Failed to transfer BP-785019810-172.17.0.2-1732436999291:blk_1073741871_1054 to 127.0.0.1:38937 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:24,221 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:24,299 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:25,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40119 is added to blk_1073741876_1059 (size=6027) 2024-11-24T08:30:25,421 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:26,221 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:26,299 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:27,422 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:28,222 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:28,299 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:29,422 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:30,137 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T08:30:30,222 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:30,300 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:30,398 ERROR [FSHLog-0-hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData-prefix:30c28c82771d,35659,1732437000158 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:30,398 WARN [FSHLog-0-hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData-prefix:30c28c82771d,35659,1732437000158 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:30,398 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 30c28c82771d%2C35659%2C1732437000158:(num 1732437000316) roll requested 2024-11-24T08:30:30,399 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C35659%2C1732437000158.1732437030399 2024-11-24T08:30:30,403 WARN [Thread-998 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38937 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:30,403 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1018167684_22 at /127.0.0.1:41852 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741877_1060] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data10]'}, localName='127.0.0.1:33917', datanodeUuid='948161ed-05be-4a7c-a49c-c6650364918f', xmitsInProgress=0}:Exception transferring block BP-785019810-172.17.0.2-1732436999291:blk_1073741877_1060 to mirror 127.0.0.1:38937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:30,403 WARN [Thread-998 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK], DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]) is bad. 2024-11-24T08:30:30,403 WARN [Thread-998 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741877_1060 2024-11-24T08:30:30,403 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1018167684_22 at /127.0.0.1:41852 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741877_1060] {}] datanode.BlockReceiver(316): Block 1073741877 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T08:30:30,403 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1018167684_22 at /127.0.0.1:41852 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741877_1060] {}] datanode.DataXceiver(331): 127.0.0.1:33917:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41852 dst: /127.0.0.1:33917 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:30,403 WARN [Thread-998 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK] 2024-11-24T08:30:30,405 WARN [Thread-998 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:30,405 WARN [Thread-998 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]) is bad. 2024-11-24T08:30:30,405 WARN [Thread-998 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741878_1061 2024-11-24T08:30:30,405 WARN [Thread-998 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK] 2024-11-24T08:30:30,409 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:30,409 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:30,410 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:30,410 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:30,410 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:30,410 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/WALs/30c28c82771d,35659,1732437000158/30c28c82771d%2C35659%2C1732437000158.1732437000316 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/WALs/30c28c82771d,35659,1732437000158/30c28c82771d%2C35659%2C1732437000158.1732437030399 2024-11-24T08:30:30,410 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:30,410 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:30,411 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/WALs/30c28c82771d,35659,1732437000158/30c28c82771d%2C35659%2C1732437000158.1732437000316 2024-11-24T08:30:30,411 WARN [IPC Server handler 2 on default port 45383 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/WALs/30c28c82771d,35659,1732437000158/30c28c82771d%2C35659%2C1732437000158.1732437000316 has not been closed. Lease recovery is in progress. RecoveryId = 1063 for block blk_1073741830_1006 2024-11-24T08:30:30,411 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/WALs/30c28c82771d,35659,1732437000158/30c28c82771d%2C35659%2C1732437000158.1732437000316 after 0ms 2024-11-24T08:30:30,414 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40801:40801),(127.0.0.1/127.0.0.1:35021:35021)] 2024-11-24T08:30:30,414 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/WALs/30c28c82771d,35659,1732437000158/30c28c82771d%2C35659%2C1732437000158.1732437000316 is not closed yet, will try archiving it next time 2024-11-24T08:30:31,423 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:32,223 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:32,542 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@ee6f638 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-785019810-172.17.0.2-1732436999291:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:34653,null,null]) java.net.ConnectException: Call From 30c28c82771d/172.17.0.2 to localhost:46383 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-24T08:30:32,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40119 is added to blk_1073741833_1019 (size=455) 2024-11-24T08:30:33,164 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437000642 to hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/oldWALs/30c28c82771d%2C45841%2C1732437000220.1732437000642 2024-11-24T08:30:33,166 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437018160 to hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/oldWALs/30c28c82771d%2C45841%2C1732437000220.1732437018160 2024-11-24T08:30:33,423 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:34,223 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:34,412 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/WALs/30c28c82771d,35659,1732437000158/30c28c82771d%2C35659%2C1732437000158.1732437000316 after 4001ms 2024-11-24T08:30:35,423 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:36,223 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:36,518 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3fa27928[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40119, datanodeUuid=5ec8d436-8ea8-4223-9061-288fdef1d878, infoPort=35021, infoSecurePort=0, ipcPort=42945, storageInfo=lv=-57;cid=testClusterID;nsid=1872310701;c=1732436999291):Failed to transfer BP-785019810-172.17.0.2-1732436999291:blk_1073741833_1019 to 127.0.0.1:38937 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:37,424 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:37,855 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C45841%2C1732437000220.1732437037855 2024-11-24T08:30:37,859 WARN [Thread-1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1064 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38937 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:37,859 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1018167684_22 at /127.0.0.1:54844 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741880_1064] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data10]'}, localName='127.0.0.1:33917', datanodeUuid='948161ed-05be-4a7c-a49c-c6650364918f', xmitsInProgress=0}:Exception transferring block BP-785019810-172.17.0.2-1732436999291:blk_1073741880_1064 to mirror 127.0.0.1:38937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:37,860 WARN [Thread-1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741880_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK], DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]) is bad. 2024-11-24T08:30:37,860 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1018167684_22 at /127.0.0.1:54844 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741880_1064] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T08:30:37,860 WARN [Thread-1009 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741880_1064 2024-11-24T08:30:37,860 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1018167684_22 at /127.0.0.1:54844 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741880_1064] {}] datanode.DataXceiver(331): 127.0.0.1:33917:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54844 dst: /127.0.0.1:33917 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:37,860 WARN [Thread-1009 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK] 2024-11-24T08:30:37,864 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:37,865 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:37,865 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:37,865 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:37,865 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:37,865 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437020189 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437037855 2024-11-24T08:30:37,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741861_1044 (size=13591) 2024-11-24T08:30:37,867 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35021:35021),(127.0.0.1/127.0.0.1:40801:40801)] 2024-11-24T08:30:37,867 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437020189 is not closed yet, will try archiving it next time 2024-11-24T08:30:37,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45841 {}] regionserver.HRegion(8855): Flush requested on ed518b858264204ec9d728e5a6b90ada 2024-11-24T08:30:37,877 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ed518b858264204ec9d728e5a6b90ada 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T08:30:37,882 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/2b26f08b87774d1f8989f6b863f49352 is 1080, key is row0013/info:/1732437037868/Put/seqid=0 2024-11-24T08:30:37,884 WARN [Thread-1016 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:37,884 WARN [Thread-1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741882_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]) is bad. 2024-11-24T08:30:37,884 WARN [Thread-1016 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741882_1066 2024-11-24T08:30:37,885 WARN [Thread-1016 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK] 2024-11-24T08:30:37,886 WARN [Thread-1016 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:37,886 WARN [Thread-1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741883_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK], DatanodeInfoWithStorage[127.0.0.1:40119,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]) is bad. 2024-11-24T08:30:37,886 WARN [Thread-1016 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741883_1067 2024-11-24T08:30:37,886 WARN [Thread-1016 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK] 2024-11-24T08:30:37,889 WARN [Thread-1016 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38937 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:37,889 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:54850 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741884_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data10]'}, localName='127.0.0.1:33917', datanodeUuid='948161ed-05be-4a7c-a49c-c6650364918f', xmitsInProgress=0}:Exception transferring block BP-785019810-172.17.0.2-1732436999291:blk_1073741884_1068 to mirror 127.0.0.1:38937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:37,889 WARN [Thread-1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741884_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK], DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]) is bad. 2024-11-24T08:30:37,889 WARN [Thread-1016 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741884_1068 2024-11-24T08:30:37,889 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:54850 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741884_1068] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:30:37,889 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:54850 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741884_1068] {}] datanode.DataXceiver(331): 127.0.0.1:33917:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54850 dst: /127.0.0.1:33917 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:37,889 WARN [Thread-1016 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK] 2024-11-24T08:30:37,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40119 is added to blk_1073741885_1069 (size=11421) 2024-11-24T08:30:37,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741885_1069 (size=11421) 2024-11-24T08:30:37,895 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/2b26f08b87774d1f8989f6b863f49352 2024-11-24T08:30:37,903 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/2b26f08b87774d1f8989f6b863f49352 as hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/2b26f08b87774d1f8989f6b863f49352 2024-11-24T08:30:37,910 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/2b26f08b87774d1f8989f6b863f49352, entries=6, sequenceid=55, filesize=11.2 K 2024-11-24T08:30:37,911 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for ed518b858264204ec9d728e5a6b90ada in 34ms, sequenceid=55, compaction requested=true 2024-11-24T08:30:37,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ed518b858264204ec9d728e5a6b90ada: 2024-11-24T08:30:37,912 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-11-24T08:30:37,912 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:30:37,912 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/ef49d752d02145a88893f7a4c7f26e11 because midkey is the same as first or last row 2024-11-24T08:30:37,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed518b858264204ec9d728e5a6b90ada:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:30:37,912 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:30:37,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:30:37,913 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:30:37,914 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.HStore(1541): ed518b858264204ec9d728e5a6b90ada/info is initiating minor compaction (all files) 2024-11-24T08:30:37,914 INFO [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ed518b858264204ec9d728e5a6b90ada/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada. 2024-11-24T08:30:37,914 INFO [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/ef49d752d02145a88893f7a4c7f26e11, hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/f76e5a64d21d4c299b9ed6ed42244d61, hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/2b26f08b87774d1f8989f6b863f49352] into tmpdir=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp, totalSize=34.6 K 2024-11-24T08:30:37,914 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] compactions.Compactor(225): Compacting ef49d752d02145a88893f7a4c7f26e11, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732437014149 2024-11-24T08:30:37,915 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] compactions.Compactor(225): Compacting f76e5a64d21d4c299b9ed6ed42244d61, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732437021656 2024-11-24T08:30:37,915 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2b26f08b87774d1f8989f6b863f49352, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732437022062 2024-11-24T08:30:37,935 INFO [RS:0;30c28c82771d:45841-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed518b858264204ec9d728e5a6b90ada#info#compaction#24 average throughput is 8.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:30:37,936 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/3af2e63e9a32408eba3ea0e8a27f9399 is 1080, key is row0002/info:/1732437014149/Put/seqid=0 2024-11-24T08:30:37,939 WARN [Thread-1026 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1070 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:37,939 WARN [Thread-1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741886_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]) is bad. 2024-11-24T08:30:37,939 WARN [Thread-1026 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741886_1070 2024-11-24T08:30:37,940 WARN [Thread-1026 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK] 2024-11-24T08:30:37,943 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:60326 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741887_1071] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data4]'}, localName='127.0.0.1:40119', datanodeUuid='5ec8d436-8ea8-4223-9061-288fdef1d878', xmitsInProgress=0}:Exception transferring block BP-785019810-172.17.0.2-1732436999291:blk_1073741887_1071 to mirror 127.0.0.1:38937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:37,943 WARN [Thread-1026 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1071 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38937 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:37,943 WARN [Thread-1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741887_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40119,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK], DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]) is bad. 2024-11-24T08:30:37,943 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:60326 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741887_1071] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:30:37,943 WARN [Thread-1026 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741887_1071 2024-11-24T08:30:37,943 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:60326 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741887_1071] {}] datanode.DataXceiver(331): 127.0.0.1:40119:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60326 dst: /127.0.0.1:40119 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:37,944 WARN [Thread-1026 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK] 2024-11-24T08:30:37,945 WARN [Thread-1026 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:37,945 WARN [Thread-1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741888_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]) is bad. 2024-11-24T08:30:37,946 WARN [Thread-1026 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741888_1072 2024-11-24T08:30:37,946 WARN [Thread-1026 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK] 2024-11-24T08:30:37,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741889_1073 (size=23502) 2024-11-24T08:30:37,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40119 is added to blk_1073741889_1073 (size=23502) 2024-11-24T08:30:37,964 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/3af2e63e9a32408eba3ea0e8a27f9399 as hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/3af2e63e9a32408eba3ea0e8a27f9399 2024-11-24T08:30:37,971 INFO [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ed518b858264204ec9d728e5a6b90ada/info of ed518b858264204ec9d728e5a6b90ada into 3af2e63e9a32408eba3ea0e8a27f9399(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:30:37,971 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ed518b858264204ec9d728e5a6b90ada: 2024-11-24T08:30:37,971 INFO [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada., storeName=ed518b858264204ec9d728e5a6b90ada/info, priority=13, startTime=1732437037912; duration=0sec 2024-11-24T08:30:37,971 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-24T08:30:37,971 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:30:37,971 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/3af2e63e9a32408eba3ea0e8a27f9399 because midkey is the same as first or last row 2024-11-24T08:30:37,971 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-24T08:30:37,971 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:30:37,971 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/3af2e63e9a32408eba3ea0e8a27f9399 because midkey is the same as first or last row 2024-11-24T08:30:37,971 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-24T08:30:37,971 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:30:37,971 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/3af2e63e9a32408eba3ea0e8a27f9399 because midkey is the same as first or last row 2024-11-24T08:30:37,971 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:30:37,971 DEBUG [RS:0;30c28c82771d:45841-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed518b858264204ec9d728e5a6b90ada:info 2024-11-24T08:30:38,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45841 {}] regionserver.HRegion(8855): Flush requested on ed518b858264204ec9d728e5a6b90ada 2024-11-24T08:30:38,091 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ed518b858264204ec9d728e5a6b90ada 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T08:30:38,098 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/feff1bde758e4920a9ff802e6702b9f3 is 1080, key is row0018/info:/1732437037878/Put/seqid=0 2024-11-24T08:30:38,100 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1074 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33711 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:38,100 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:60346 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741890_1074] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data4]'}, localName='127.0.0.1:40119', datanodeUuid='5ec8d436-8ea8-4223-9061-288fdef1d878', xmitsInProgress=0}:Exception transferring block BP-785019810-172.17.0.2-1732436999291:blk_1073741890_1074 to mirror 127.0.0.1:33711 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:38,100 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741890_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40119,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK], DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]) is bad. 2024-11-24T08:30:38,101 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741890_1074 2024-11-24T08:30:38,101 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:60346 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741890_1074] {}] datanode.BlockReceiver(316): Block 1073741890 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:30:38,101 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:60346 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741890_1074] {}] datanode.DataXceiver(331): 127.0.0.1:40119:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60346 dst: /127.0.0.1:40119 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:38,101 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK] 2024-11-24T08:30:38,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40119 is added to blk_1073741891_1075 (size=11421) 2024-11-24T08:30:38,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741891_1075 (size=11421) 2024-11-24T08:30:38,106 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/feff1bde758e4920a9ff802e6702b9f3 2024-11-24T08:30:38,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/.tmp/info/feff1bde758e4920a9ff802e6702b9f3 as hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/feff1bde758e4920a9ff802e6702b9f3 2024-11-24T08:30:38,118 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/feff1bde758e4920a9ff802e6702b9f3, entries=6, sequenceid=66, filesize=11.2 K 2024-11-24T08:30:38,120 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ed518b858264204ec9d728e5a6b90ada in 28ms, sequenceid=66, compaction requested=false 2024-11-24T08:30:38,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ed518b858264204ec9d728e5a6b90ada: 2024-11-24T08:30:38,120 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.1 K, sizeToCheck=16.0 K 2024-11-24T08:30:38,120 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:30:38,120 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/3af2e63e9a32408eba3ea0e8a27f9399 because midkey is the same as first or last row 2024-11-24T08:30:38,224 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:38,224 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-24T08:30:38,268 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.1732437020189 to hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/oldWALs/30c28c82771d%2C45841%2C1732437000220.1732437020189 2024-11-24T08:30:38,292 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T08:30:38,292 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:30:38,292 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:30:38,292 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:30:38,292 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:30:38,292 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T08:30:38,293 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1263878371, stopped=false 2024-11-24T08:30:38,292 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T08:30:38,293 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=30c28c82771d,35659,1732437000158 2024-11-24T08:30:38,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:30:38,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:30:38,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:38,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:38,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34635-0x1014918aa5b0002, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:30:38,294 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:30:38,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34635-0x1014918aa5b0002, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:38,295 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:30:38,295 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:30:38,295 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:30:38,295 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '30c28c82771d,45841,1732437000220' ***** 2024-11-24T08:30:38,295 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T08:30:38,295 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '30c28c82771d,34635,1732437001369' ***** 2024-11-24T08:30:38,295 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T08:30:38,295 INFO [RS:1;30c28c82771d:34635 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T08:30:38,295 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:30:38,295 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34635-0x1014918aa5b0002, quorum=127.0.0.1:65078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:30:38,296 INFO [RS:1;30c28c82771d:34635 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T08:30:38,296 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T08:30:38,296 INFO [RS:0;30c28c82771d:45841 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T08:30:38,296 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:30:38,296 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T08:30:38,296 INFO [RS:0;30c28c82771d:45841 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T08:30:38,296 INFO [RS:0;30c28c82771d:45841 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T08:30:38,296 INFO [RS:0;30c28c82771d:45841 {}] regionserver.HRegionServer(3091): Received CLOSE for ed518b858264204ec9d728e5a6b90ada 2024-11-24T08:30:38,297 INFO [RS:1;30c28c82771d:34635 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T08:30:38,297 INFO [RS:1;30c28c82771d:34635 {}] regionserver.HRegionServer(959): stopping server 30c28c82771d,34635,1732437001369 2024-11-24T08:30:38,297 INFO [RS:1;30c28c82771d:34635 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:30:38,297 INFO [RS:1;30c28c82771d:34635 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;30c28c82771d:34635. 2024-11-24T08:30:38,297 DEBUG [RS:1;30c28c82771d:34635 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:30:38,297 INFO [RS:0;30c28c82771d:45841 {}] regionserver.HRegionServer(959): stopping server 30c28c82771d,45841,1732437000220 2024-11-24T08:30:38,297 DEBUG [RS:1;30c28c82771d:34635 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:30:38,297 INFO [RS:0;30c28c82771d:45841 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:30:38,297 INFO [RS:1;30c28c82771d:34635 {}] regionserver.HRegionServer(976): stopping server 30c28c82771d,34635,1732437001369; all regions closed. 2024-11-24T08:30:38,297 INFO [RS:0;30c28c82771d:45841 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;30c28c82771d:45841. 2024-11-24T08:30:38,297 DEBUG [RS:0;30c28c82771d:45841 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:30:38,297 DEBUG [RS:0;30c28c82771d:45841 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:30:38,297 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ed518b858264204ec9d728e5a6b90ada, disabling compactions & flushes 2024-11-24T08:30:38,297 INFO [RS:0;30c28c82771d:45841 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T08:30:38,297 INFO [RS:0;30c28c82771d:45841 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T08:30:38,297 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada. 2024-11-24T08:30:38,297 INFO [RS:0;30c28c82771d:45841 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T08:30:38,297 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada. 2024-11-24T08:30:38,298 INFO [RS:0;30c28c82771d:45841 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T08:30:38,298 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada. after waiting 0 ms 2024-11-24T08:30:38,298 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:38,298 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada. 2024-11-24T08:30:38,298 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:38,298 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:38,298 INFO [RS:0;30c28c82771d:45841 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T08:30:38,298 DEBUG [RS:0;30c28c82771d:45841 {}] regionserver.HRegionServer(1325): Online Regions={ed518b858264204ec9d728e5a6b90ada=TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada., 1588230740=hbase:meta,,1.1588230740} 2024-11-24T08:30:38,298 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:38,298 DEBUG [RS:0;30c28c82771d:45841 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ed518b858264204ec9d728e5a6b90ada 2024-11-24T08:30:38,298 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:38,298 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:30:38,298 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:30:38,298 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:30:38,298 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:30:38,298 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:30:38,298 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/164b79a885bb43eda3147462612e2295, hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/0a17bda815984f9484f14e731bfbc32a, hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/ef49d752d02145a88893f7a4c7f26e11, hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/415defab429f4b29a2e0de1090cac805, hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/f76e5a64d21d4c299b9ed6ed42244d61, hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/2b26f08b87774d1f8989f6b863f49352] to archive 2024-11-24T08:30:38,298 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-24T08:30:38,298 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:38,299 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:38,299 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 2024-11-24T08:30:38,299 ERROR [FSHLog-0-hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40-prefix:30c28c82771d,45841,1732437000220.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:38,299 WARN [FSHLog-0-hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40-prefix:30c28c82771d,45841,1732437000220.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:38,299 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30c28c82771d%2C45841%2C1732437000220.meta:.meta(num 1732437001205) roll requested 2024-11-24T08:30:38,299 WARN [IPC Server handler 4 on default port 45383 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 has not been closed. Lease recovery is in progress. RecoveryId = 1076 for block blk_1073741837_1013 2024-11-24T08:30:38,299 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T08:30:38,299 INFO [regionserver/30c28c82771d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C45841%2C1732437000220.meta.1732437038299.meta 2024-11-24T08:30:38,300 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 after 0ms 2024-11-24T08:30:38,301 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/164b79a885bb43eda3147462612e2295 to hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/164b79a885bb43eda3147462612e2295 2024-11-24T08:30:38,302 WARN [Thread-1040 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:38,302 WARN [Thread-1040 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741892_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK], DatanodeInfoWithStorage[127.0.0.1:40119,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]) is bad. 2024-11-24T08:30:38,302 WARN [Thread-1040 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741892_1077 2024-11-24T08:30:38,303 WARN [Thread-1040 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK] 2024-11-24T08:30:38,303 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/0a17bda815984f9484f14e731bfbc32a to hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/0a17bda815984f9484f14e731bfbc32a 2024-11-24T08:30:38,304 WARN [Thread-1040 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1078 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:38,304 WARN [Thread-1040 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741893_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK], DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]) is bad. 2024-11-24T08:30:38,304 WARN [Thread-1040 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741893_1078 2024-11-24T08:30:38,304 WARN [Thread-1040 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK] 2024-11-24T08:30:38,305 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/ef49d752d02145a88893f7a4c7f26e11 to hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/ef49d752d02145a88893f7a4c7f26e11 2024-11-24T08:30:38,306 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/415defab429f4b29a2e0de1090cac805 to hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/415defab429f4b29a2e0de1090cac805 2024-11-24T08:30:38,308 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/f76e5a64d21d4c299b9ed6ed42244d61 to hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/f76e5a64d21d4c299b9ed6ed42244d61 2024-11-24T08:30:38,309 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/2b26f08b87774d1f8989f6b863f49352 to hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/info/2b26f08b87774d1f8989f6b863f49352 2024-11-24T08:30:38,309 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:38,309 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:38,309 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:38,309 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:38,309 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=30c28c82771d:35659 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-24T08:30:38,310 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:38,310 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [164b79a885bb43eda3147462612e2295=10347, 0a17bda815984f9484f14e731bfbc32a=12506, ef49d752d02145a88893f7a4c7f26e11=17994, 415defab429f4b29a2e0de1090cac805=6027, f76e5a64d21d4c299b9ed6ed42244d61=6027, 2b26f08b87774d1f8989f6b863f49352=11421] 2024-11-24T08:30:38,310 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437038299.meta 2024-11-24T08:30:38,310 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:38,310 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:38,310 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta 2024-11-24T08:30:38,311 WARN [IPC Server handler 4 on default port 45383 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta has not been closed. Lease recovery is in progress. RecoveryId = 1080 for block blk_1073741834_1010 2024-11-24T08:30:38,311 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta after 1ms 2024-11-24T08:30:38,313 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40801:40801),(127.0.0.1/127.0.0.1:35021:35021)] 2024-11-24T08:30:38,313 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta is not closed yet, will try archiving it next time 2024-11-24T08:30:38,315 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ed518b858264204ec9d728e5a6b90ada/recovered.edits/69.seqid, newMaxSeqId=69, maxSeqId=1 2024-11-24T08:30:38,315 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada. 2024-11-24T08:30:38,315 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ed518b858264204ec9d728e5a6b90ada: Waiting for close lock at 1732437038297Running coprocessor pre-close hooks at 1732437038297Disabling compacts and flushes for region at 1732437038297Disabling writes for close at 1732437038298 (+1 ms)Writing region close event to WAL at 1732437038310 (+12 ms)Running coprocessor post-close hooks at 1732437038315 (+5 ms)Closed at 1732437038315 2024-11-24T08:30:38,315 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada. 2024-11-24T08:30:38,328 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740/.tmp/info/b56c179795f945f1b837b20986aac1ef is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732437001454.ed518b858264204ec9d728e5a6b90ada./info:regioninfo/1732437001824/Put/seqid=0 2024-11-24T08:30:38,330 WARN [Thread-1047 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1081 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:38,330 WARN [Thread-1047 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741895_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]) is bad. 2024-11-24T08:30:38,330 WARN [Thread-1047 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741895_1081 2024-11-24T08:30:38,331 WARN [Thread-1047 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK] 2024-11-24T08:30:38,332 WARN [Thread-1047 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1082 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:38,332 WARN [Thread-1047 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741896_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK], DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]) is bad. 2024-11-24T08:30:38,332 WARN [Thread-1047 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741896_1082 2024-11-24T08:30:38,333 WARN [Thread-1047 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK] 2024-11-24T08:30:38,337 WARN [Thread-1047 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34653 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:38,337 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:60374 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741897_1083] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data4]'}, localName='127.0.0.1:40119', datanodeUuid='5ec8d436-8ea8-4223-9061-288fdef1d878', xmitsInProgress=0}:Exception transferring block BP-785019810-172.17.0.2-1732436999291:blk_1073741897_1083 to mirror 127.0.0.1:34653 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:38,337 WARN [Thread-1047 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40119,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK], DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]) is bad. 2024-11-24T08:30:38,337 WARN [Thread-1047 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741897_1083 2024-11-24T08:30:38,337 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:60374 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741897_1083] {}] datanode.BlockReceiver(316): Block 1073741897 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:30:38,338 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:60374 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741897_1083] {}] datanode.DataXceiver(331): 127.0.0.1:40119:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60374 dst: /127.0.0.1:40119 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:38,338 WARN [Thread-1047 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK] 2024-11-24T08:30:38,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40119 is added to blk_1073741898_1084 (size=7089) 2024-11-24T08:30:38,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741898_1084 (size=7089) 2024-11-24T08:30:38,343 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740/.tmp/info/b56c179795f945f1b837b20986aac1ef 2024-11-24T08:30:38,364 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740/.tmp/ns/96cd70a067394b139c7c2772c02901af is 43, key is default/ns:d/1732437001253/Put/seqid=0 2024-11-24T08:30:38,367 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:60400 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741899_1085] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data4]'}, localName='127.0.0.1:40119', datanodeUuid='5ec8d436-8ea8-4223-9061-288fdef1d878', xmitsInProgress=0}:Exception transferring block BP-785019810-172.17.0.2-1732436999291:blk_1073741899_1085 to mirror 127.0.0.1:38937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:38,367 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38937 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:38,367 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:60400 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741899_1085] {}] datanode.BlockReceiver(316): Block 1073741899 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:30:38,367 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40119,DS-b0f9a4a2-5307-418e-9eda-cf52acff8b69,DISK], DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]) is bad. 2024-11-24T08:30:38,367 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741899_1085 2024-11-24T08:30:38,368 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:60400 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741899_1085] {}] datanode.DataXceiver(331): 127.0.0.1:40119:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60400 dst: /127.0.0.1:40119 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:38,368 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK] 2024-11-24T08:30:38,370 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1086 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34653 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:38,370 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:54922 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741900_1086] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data10]'}, localName='127.0.0.1:33917', datanodeUuid='948161ed-05be-4a7c-a49c-c6650364918f', xmitsInProgress=0}:Exception transferring block BP-785019810-172.17.0.2-1732436999291:blk_1073741900_1086 to mirror 127.0.0.1:34653 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:38,370 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741900_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK], DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]) is bad. 2024-11-24T08:30:38,370 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741900_1086 2024-11-24T08:30:38,371 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:54922 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741900_1086] {}] datanode.BlockReceiver(316): Block 1073741900 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:30:38,371 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:54922 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741900_1086] {}] datanode.DataXceiver(331): 127.0.0.1:33917:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54922 dst: /127.0.0.1:33917 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:38,371 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK] 2024-11-24T08:30:38,373 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1087 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33711 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:38,373 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:54936 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741901_1087] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data10]'}, localName='127.0.0.1:33917', datanodeUuid='948161ed-05be-4a7c-a49c-c6650364918f', xmitsInProgress=0}:Exception transferring block BP-785019810-172.17.0.2-1732436999291:blk_1073741901_1087 to mirror 127.0.0.1:33711 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:38,373 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741901_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK], DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK]) is bad. 2024-11-24T08:30:38,373 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741901_1087 2024-11-24T08:30:38,373 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:54936 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741901_1087] {}] datanode.BlockReceiver(316): Block 1073741901 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:30:38,373 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:54936 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741901_1087] {}] datanode.DataXceiver(331): 127.0.0.1:33917:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54936 dst: /127.0.0.1:33917 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:38,374 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33711,DS-650d37d5-2847-4e3d-978c-d2ef36919183,DISK] 2024-11-24T08:30:38,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40119 is added to blk_1073741902_1088 (size=5153) 2024-11-24T08:30:38,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741902_1088 (size=5153) 2024-11-24T08:30:38,379 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740/.tmp/ns/96cd70a067394b139c7c2772c02901af 2024-11-24T08:30:38,400 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740/.tmp/table/a8ae7d2066164a5d984095f7ad5f767e is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732437001835/Put/seqid=0 2024-11-24T08:30:38,402 WARN [Thread-1063 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1089 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38937 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:38,402 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:54946 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741903_1089] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data10]'}, localName='127.0.0.1:33917', datanodeUuid='948161ed-05be-4a7c-a49c-c6650364918f', xmitsInProgress=0}:Exception transferring block BP-785019810-172.17.0.2-1732436999291:blk_1073741903_1089 to mirror 127.0.0.1:38937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:38,402 WARN [Thread-1063 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741903_1089 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK], DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK]) is bad. 2024-11-24T08:30:38,403 WARN [Thread-1063 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741903_1089 2024-11-24T08:30:38,403 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:54946 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741903_1089] {}] datanode.BlockReceiver(316): Block 1073741903 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:30:38,403 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:54946 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741903_1089] {}] datanode.DataXceiver(331): 127.0.0.1:33917:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54946 dst: /127.0.0.1:33917 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:38,403 WARN [Thread-1063 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38937,DS-6a2b6d8f-c2ed-47cf-8e46-abdf142f9079,DISK] 2024-11-24T08:30:38,405 WARN [Thread-1063 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741904_1090 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34653 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:38,405 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:54962 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741904_1090] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data10]'}, localName='127.0.0.1:33917', datanodeUuid='948161ed-05be-4a7c-a49c-c6650364918f', xmitsInProgress=0}:Exception transferring block BP-785019810-172.17.0.2-1732436999291:blk_1073741904_1090 to mirror 127.0.0.1:34653 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:38,406 WARN [Thread-1063 {}] hdfs.DataStreamer(1731): Error Recovery for BP-785019810-172.17.0.2-1732436999291:blk_1073741904_1090 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33917,DS-f644a837-8d57-4dba-ae2f-d63ab65f1939,DISK], DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK]) is bad. 2024-11-24T08:30:38,406 WARN [Thread-1063 {}] hdfs.DataStreamer(1850): Abandoning BP-785019810-172.17.0.2-1732436999291:blk_1073741904_1090 2024-11-24T08:30:38,406 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:54962 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741904_1090] {}] datanode.BlockReceiver(316): Block 1073741904 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T08:30:38,406 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1776818133_22 at /127.0.0.1:54962 [Receiving block BP-785019810-172.17.0.2-1732436999291:blk_1073741904_1090] {}] datanode.DataXceiver(331): 127.0.0.1:33917:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54962 dst: /127.0.0.1:33917 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:38,406 WARN [Thread-1063 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34653,DS-06b9a65f-3a28-4448-a53c-d825f9800a62,DISK] 2024-11-24T08:30:38,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741905_1091 (size=5424) 2024-11-24T08:30:38,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40119 is added to blk_1073741905_1091 (size=5424) 2024-11-24T08:30:38,411 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740/.tmp/table/a8ae7d2066164a5d984095f7ad5f767e 2024-11-24T08:30:38,419 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740/.tmp/info/b56c179795f945f1b837b20986aac1ef as hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740/info/b56c179795f945f1b837b20986aac1ef 2024-11-24T08:30:38,419 INFO [regionserver/30c28c82771d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T08:30:38,419 INFO [regionserver/30c28c82771d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T08:30:38,425 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740/info/b56c179795f945f1b837b20986aac1ef, entries=10, sequenceid=11, filesize=6.9 K 2024-11-24T08:30:38,426 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740/.tmp/ns/96cd70a067394b139c7c2772c02901af as hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740/ns/96cd70a067394b139c7c2772c02901af 2024-11-24T08:30:38,432 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740/ns/96cd70a067394b139c7c2772c02901af, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T08:30:38,433 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740/.tmp/table/a8ae7d2066164a5d984095f7ad5f767e as hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740/table/a8ae7d2066164a5d984095f7ad5f767e 2024-11-24T08:30:38,439 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740/table/a8ae7d2066164a5d984095f7ad5f767e, entries=2, sequenceid=11, filesize=5.3 K 2024-11-24T08:30:38,440 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 142ms, sequenceid=11, compaction requested=false 2024-11-24T08:30:38,447 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T08:30:38,447 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:30:38,448 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:30:38,448 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732437038298Running coprocessor pre-close hooks at 1732437038298Disabling compacts and flushes for region at 1732437038298Disabling writes for close at 1732437038298Obtaining lock to block concurrent updates at 1732437038298Preparing flush snapshotting stores in 1588230740 at 1732437038298Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732437038299 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732437038313 (+14 ms)Flushing 1588230740/info: creating writer at 1732437038313Flushing 1588230740/info: appending metadata at 1732437038328 (+15 ms)Flushing 1588230740/info: closing flushed file at 1732437038328Flushing 1588230740/ns: creating writer at 1732437038349 (+21 ms)Flushing 1588230740/ns: appending metadata at 1732437038364 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732437038364Flushing 1588230740/table: creating writer at 1732437038385 (+21 ms)Flushing 1588230740/table: appending metadata at 1732437038399 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732437038399Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@94c0551: reopening flushed file at 1732437038418 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3deeadac: reopening flushed file at 1732437038425 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7bdbf7c4: reopening flushed file at 1732437038433 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 142ms, sequenceid=11, compaction requested=false at 1732437038440 (+7 ms)Writing region close event to WAL at 1732437038443 (+3 ms)Running coprocessor post-close hooks at 1732437038447 (+4 ms)Closed at 1732437038448 (+1 ms) 2024-11-24T08:30:38,448 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T08:30:38,480 INFO [regionserver/30c28c82771d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T08:30:38,480 INFO [regionserver/30c28c82771d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T08:30:38,480 INFO [regionserver/30c28c82771d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:30:38,498 INFO [RS:0;30c28c82771d:45841 {}] regionserver.HRegionServer(976): stopping server 30c28c82771d,45841,1732437000220; all regions closed. 2024-11-24T08:30:38,499 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:38,499 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:38,499 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:38,499 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:38,499 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:38,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40119 is added to blk_1073741894_1079 (size=825) 2024-11-24T08:30:38,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741894_1079 (size=825) 2024-11-24T08:30:39,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40119 is added to blk_1073741861_1044 (size=13591) 2024-11-24T08:30:39,420 INFO [regionserver/30c28c82771d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:30:39,522 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T08:30:39,523 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:30:39,523 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T08:30:41,771 INFO [master/30c28c82771d:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T08:30:41,771 INFO [master/30c28c82771d:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T08:30:42,300 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 after 4001ms 2024-11-24T08:30:42,312 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta after 4002ms 2024-11-24T08:30:42,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741836_1012 (size=76) 2024-11-24T08:30:42,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:30:42,546 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2f09a32e {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-785019810-172.17.0.2-1732436999291:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:34653,null,null]) java.net.ConnectException: Call From 30c28c82771d/172.17.0.2 to localhost:46383 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-24T08:30:43,299 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-24T08:30:43,301 DEBUG [RS:1;30c28c82771d:34635 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/oldWALs 2024-11-24T08:30:43,301 INFO [RS:1;30c28c82771d:34635 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30c28c82771d%2C34635%2C1732437001369:(num 1732437001555) 2024-11-24T08:30:43,301 DEBUG [RS:1;30c28c82771d:34635 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:30:43,301 INFO [RS:1;30c28c82771d:34635 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:30:43,301 INFO [RS:1;30c28c82771d:34635 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:30:43,301 INFO [RS:1;30c28c82771d:34635 {}] hbase.ChoreService(370): Chore service for: regionserver/30c28c82771d:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T08:30:43,301 INFO [RS:1;30c28c82771d:34635 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T08:30:43,301 INFO [RS:1;30c28c82771d:34635 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T08:30:43,301 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:30:43,301 INFO [RS:1;30c28c82771d:34635 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T08:30:43,301 INFO [RS:1;30c28c82771d:34635 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:30:43,302 INFO [RS:1;30c28c82771d:34635 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34635 2024-11-24T08:30:43,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34635-0x1014918aa5b0002, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/30c28c82771d,34635,1732437001369 2024-11-24T08:30:43,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:30:43,303 INFO [RS:1;30c28c82771d:34635 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:30:43,304 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [30c28c82771d,34635,1732437001369] 2024-11-24T08:30:43,306 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/30c28c82771d,34635,1732437001369 already deleted, retry=false 2024-11-24T08:30:43,306 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 30c28c82771d,34635,1732437001369 expired; onlineServers=1 2024-11-24T08:30:43,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:43,316 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:43,331 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:43,331 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:43,331 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:43,332 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:43,332 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:43,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:43,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:43,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34635-0x1014918aa5b0002, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:30:43,406 INFO [RS:1;30c28c82771d:34635 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:30:43,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34635-0x1014918aa5b0002, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:30:43,406 INFO [RS:1;30c28c82771d:34635 {}] regionserver.HRegionServer(1031): Exiting; stopping=30c28c82771d,34635,1732437001369; zookeeper connection closed. 2024-11-24T08:30:43,406 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@28809216 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@28809216 2024-11-24T08:30:43,499 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-24T08:30:43,503 DEBUG [RS:0;30c28c82771d:45841 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/oldWALs 2024-11-24T08:30:43,503 INFO [RS:0;30c28c82771d:45841 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30c28c82771d%2C45841%2C1732437000220.meta:.meta(num 1732437038299) 2024-11-24T08:30:43,503 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:43,503 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:43,503 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:43,504 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:43,504 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:43,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741881_1065 (size=16308) 2024-11-24T08:30:43,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40119 is added to blk_1073741881_1065 (size=16308) 2024-11-24T08:30:43,508 DEBUG [RS:0;30c28c82771d:45841 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/oldWALs 2024-11-24T08:30:43,508 INFO [RS:0;30c28c82771d:45841 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30c28c82771d%2C45841%2C1732437000220:(num 1732437037855) 2024-11-24T08:30:43,508 DEBUG [RS:0;30c28c82771d:45841 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:30:43,508 INFO [RS:0;30c28c82771d:45841 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:30:43,508 INFO [RS:0;30c28c82771d:45841 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:30:43,509 INFO [RS:0;30c28c82771d:45841 {}] hbase.ChoreService(370): Chore service for: regionserver/30c28c82771d:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-24T08:30:43,509 INFO [RS:0;30c28c82771d:45841 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:30:43,509 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:30:43,509 INFO [RS:0;30c28c82771d:45841 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45841 2024-11-24T08:30:43,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/30c28c82771d,45841,1732437000220 2024-11-24T08:30:43,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:30:43,511 INFO [RS:0;30c28c82771d:45841 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:30:43,512 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [30c28c82771d,45841,1732437000220] 2024-11-24T08:30:43,514 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/30c28c82771d,45841,1732437000220 already deleted, retry=false 2024-11-24T08:30:43,514 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 30c28c82771d,45841,1732437000220 expired; onlineServers=0 2024-11-24T08:30:43,514 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '30c28c82771d,35659,1732437000158' ***** 2024-11-24T08:30:43,514 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T08:30:43,515 INFO [M:0;30c28c82771d:35659 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:30:43,515 INFO [M:0;30c28c82771d:35659 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:30:43,515 DEBUG [M:0;30c28c82771d:35659 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T08:30:43,515 DEBUG [M:0;30c28c82771d:35659 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T08:30:43,515 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T08:30:43,515 DEBUG [master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732437000398 {}] cleaner.HFileCleaner(306): Exit Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732437000398,5,FailOnTimeoutGroup] 2024-11-24T08:30:43,515 DEBUG [master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732437000399 {}] cleaner.HFileCleaner(306): Exit Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732437000399,5,FailOnTimeoutGroup] 2024-11-24T08:30:43,515 INFO [M:0;30c28c82771d:35659 {}] hbase.ChoreService(370): Chore service for: master/30c28c82771d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T08:30:43,515 INFO [M:0;30c28c82771d:35659 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:30:43,515 DEBUG [M:0;30c28c82771d:35659 {}] master.HMaster(1795): Stopping service threads 2024-11-24T08:30:43,515 INFO [M:0;30c28c82771d:35659 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T08:30:43,516 INFO [M:0;30c28c82771d:35659 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:30:43,516 INFO [M:0;30c28c82771d:35659 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T08:30:43,516 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T08:30:43,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T08:30:43,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:43,517 DEBUG [M:0;30c28c82771d:35659 {}] zookeeper.ZKUtil(347): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T08:30:43,517 WARN [M:0;30c28c82771d:35659 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T08:30:43,517 INFO [M:0;30c28c82771d:35659 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/.lastflushedseqids 2024-11-24T08:30:43,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:30:43,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:30:43,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741906_1092 (size=130) 2024-11-24T08:30:43,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40119 is added to blk_1073741906_1092 (size=130) 2024-11-24T08:30:43,525 INFO [M:0;30c28c82771d:35659 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T08:30:43,525 INFO [M:0;30c28c82771d:35659 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T08:30:43,525 DEBUG [M:0;30c28c82771d:35659 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:30:43,525 INFO [M:0;30c28c82771d:35659 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:30:43,525 DEBUG [M:0;30c28c82771d:35659 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:30:43,525 DEBUG [M:0;30c28c82771d:35659 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:30:43,525 DEBUG [M:0;30c28c82771d:35659 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:30:43,525 INFO [M:0;30c28c82771d:35659 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-24T08:30:43,543 DEBUG [M:0;30c28c82771d:35659 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b5bb53177fe04a30b323b38c97b48099 is 82, key is hbase:meta,,1/info:regioninfo/1732437001235/Put/seqid=0 2024-11-24T08:30:43,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741907_1093 (size=5672) 2024-11-24T08:30:43,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40119 is added to blk_1073741907_1093 (size=5672) 2024-11-24T08:30:43,548 INFO [M:0;30c28c82771d:35659 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b5bb53177fe04a30b323b38c97b48099 2024-11-24T08:30:43,570 DEBUG [M:0;30c28c82771d:35659 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/33932ebd5b124fd7a3f74429ad70c632 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732437001840/Put/seqid=0 2024-11-24T08:30:43,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741908_1094 (size=6255) 2024-11-24T08:30:43,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40119 is added to blk_1073741908_1094 (size=6255) 2024-11-24T08:30:43,575 INFO [M:0;30c28c82771d:35659 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/33932ebd5b124fd7a3f74429ad70c632 2024-11-24T08:30:43,581 INFO [M:0;30c28c82771d:35659 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 33932ebd5b124fd7a3f74429ad70c632 2024-11-24T08:30:43,596 DEBUG [M:0;30c28c82771d:35659 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/97aaa651ad6f4ed2a3591861ff0804c9 is 69, key is 30c28c82771d,34635,1732437001369/rs:state/1732437001405/Put/seqid=0 2024-11-24T08:30:43,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40119 is added to blk_1073741909_1095 (size=5224) 2024-11-24T08:30:43,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741909_1095 (size=5224) 2024-11-24T08:30:43,601 INFO [M:0;30c28c82771d:35659 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/97aaa651ad6f4ed2a3591861ff0804c9 2024-11-24T08:30:43,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:30:43,612 INFO [RS:0;30c28c82771d:45841 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:30:43,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45841-0x1014918aa5b0001, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:30:43,612 INFO [RS:0;30c28c82771d:45841 {}] regionserver.HRegionServer(1031): Exiting; stopping=30c28c82771d,45841,1732437000220; zookeeper connection closed. 2024-11-24T08:30:43,613 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@355be7ab {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@355be7ab 2024-11-24T08:30:43,613 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-24T08:30:43,620 DEBUG [M:0;30c28c82771d:35659 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/761b29af0c00406c8ef52555713157cb is 52, key is load_balancer_on/state:d/1732437001351/Put/seqid=0 2024-11-24T08:30:43,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741910_1096 (size=5056) 2024-11-24T08:30:43,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40119 is added to blk_1073741910_1096 (size=5056) 2024-11-24T08:30:43,626 INFO [M:0;30c28c82771d:35659 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/761b29af0c00406c8ef52555713157cb 2024-11-24T08:30:43,631 DEBUG [M:0;30c28c82771d:35659 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b5bb53177fe04a30b323b38c97b48099 as hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b5bb53177fe04a30b323b38c97b48099 2024-11-24T08:30:43,637 INFO [M:0;30c28c82771d:35659 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b5bb53177fe04a30b323b38c97b48099, entries=8, sequenceid=60, filesize=5.5 K 2024-11-24T08:30:43,637 DEBUG [M:0;30c28c82771d:35659 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/33932ebd5b124fd7a3f74429ad70c632 as hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/33932ebd5b124fd7a3f74429ad70c632 2024-11-24T08:30:43,642 INFO [M:0;30c28c82771d:35659 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 33932ebd5b124fd7a3f74429ad70c632 2024-11-24T08:30:43,642 INFO [M:0;30c28c82771d:35659 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/33932ebd5b124fd7a3f74429ad70c632, entries=6, sequenceid=60, filesize=6.1 K 2024-11-24T08:30:43,643 DEBUG [M:0;30c28c82771d:35659 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/97aaa651ad6f4ed2a3591861ff0804c9 as hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/97aaa651ad6f4ed2a3591861ff0804c9 2024-11-24T08:30:43,648 INFO [M:0;30c28c82771d:35659 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/97aaa651ad6f4ed2a3591861ff0804c9, entries=2, sequenceid=60, filesize=5.1 K 2024-11-24T08:30:43,649 DEBUG [M:0;30c28c82771d:35659 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/761b29af0c00406c8ef52555713157cb as hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/761b29af0c00406c8ef52555713157cb 2024-11-24T08:30:43,653 INFO [M:0;30c28c82771d:35659 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/761b29af0c00406c8ef52555713157cb, entries=1, sequenceid=60, filesize=4.9 K 2024-11-24T08:30:43,655 INFO [M:0;30c28c82771d:35659 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=60, compaction requested=false 2024-11-24T08:30:43,656 INFO [M:0;30c28c82771d:35659 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:30:43,656 DEBUG [M:0;30c28c82771d:35659 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732437043525Disabling compacts and flushes for region at 1732437043525Disabling writes for close at 1732437043525Obtaining lock to block concurrent updates at 1732437043525Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732437043525Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1732437043526 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732437043526Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732437043526Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732437043542 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732437043542Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732437043554 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732437043569 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732437043569Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732437043581 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732437043595 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732437043595Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732437043606 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732437043620 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732437043620Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7cf35b6e: reopening flushed file at 1732437043631 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6eba7263: reopening flushed file at 1732437043637 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@26f740dd: reopening flushed file at 1732437043642 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76f0cf23: reopening flushed file at 1732437043648 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=60, compaction requested=false at 1732437043655 (+7 ms)Writing region close event to WAL at 1732437043656 (+1 ms)Closed at 1732437043656 2024-11-24T08:30:43,657 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:43,657 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:43,657 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:43,657 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:43,657 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:43,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40119 is added to blk_1073741879_1062 (size=1045) 2024-11-24T08:30:43,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741879_1062 (size=1045) 2024-11-24T08:30:43,660 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:30:43,660 INFO [M:0;30c28c82771d:35659 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T08:30:43,660 INFO [M:0;30c28c82771d:35659 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35659 2024-11-24T08:30:43,660 INFO [M:0;30c28c82771d:35659 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:30:43,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:30:43,762 INFO [M:0;30c28c82771d:35659 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:30:43,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35659-0x1014918aa5b0000, quorum=127.0.0.1:65078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:30:43,765 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e14b80{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:30:43,765 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@661cd0bd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:30:43,765 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:30:43,765 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10460b4f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:30:43,765 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@89ed911{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/hadoop.log.dir/,STOPPED} 2024-11-24T08:30:43,766 WARN [BP-785019810-172.17.0.2-1732436999291 heartbeating to localhost/127.0.0.1:45383 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:30:43,767 WARN [BP-785019810-172.17.0.2-1732436999291 heartbeating to localhost/127.0.0.1:45383 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-785019810-172.17.0.2-1732436999291 (Datanode Uuid 5ec8d436-8ea8-4223-9061-288fdef1d878) service to localhost/127.0.0.1:45383 2024-11-24T08:30:43,767 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:30:43,767 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:30:43,767 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@57a7728d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-785019810-172.17.0.2-1732436999291:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:34653,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:46383 , LocalHost:localPort 30c28c82771d/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-24T08:30:43,767 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@57a7728d {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-785019810-172.17.0.2-1732436999291:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:40119,null,null], DatanodeInfoWithStorage[127.0.0.1:34653,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-785019810-172.17.0.2-1732436999291 2024-11-24T08:30:43,767 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data3/current/BP-785019810-172.17.0.2-1732436999291 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:30:43,767 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@57a7728d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-785019810-172.17.0.2-1732436999291:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:34653,null,null]) java.io.IOException: No block pool offer service for bpid=BP-785019810-172.17.0.2-1732436999291 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:43,767 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@57a7728d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-785019810-172.17.0.2-1732436999291:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:40119,null,null]) java.io.IOException: No block pool offer service for bpid=BP-785019810-172.17.0.2-1732436999291 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:43,767 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@57a7728d {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-785019810-172.17.0.2-1732436999291:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:34653,null,null], DatanodeInfoWithStorage[127.0.0.1:40119,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-785019810-172.17.0.2-1732436999291:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:34653,null,null], DatanodeInfoWithStorage[127.0.0.1:40119,null,null]] 2024-11-24T08:30:43,768 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data4/current/BP-785019810-172.17.0.2-1732436999291 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:30:43,768 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:30:43,771 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1b0035e1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:30:43,772 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@66046020{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:30:43,772 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:30:43,772 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b4117c9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:30:43,772 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@412902c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/hadoop.log.dir/,STOPPED} 2024-11-24T08:30:43,773 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:30:43,773 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:30:43,773 WARN [BP-785019810-172.17.0.2-1732436999291 heartbeating to localhost/127.0.0.1:45383 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:30:43,773 WARN [BP-785019810-172.17.0.2-1732436999291 heartbeating to localhost/127.0.0.1:45383 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-785019810-172.17.0.2-1732436999291 (Datanode Uuid 948161ed-05be-4a7c-a49c-c6650364918f) service to localhost/127.0.0.1:45383 2024-11-24T08:30:43,774 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data9/current/BP-785019810-172.17.0.2-1732436999291 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:30:43,774 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/cluster_9e452959-c278-03a0-7eee-59c8b39ee58b/data/data10/current/BP-785019810-172.17.0.2-1732436999291 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:30:43,774 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:30:43,780 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6469263a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:30:43,780 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2324be38{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:30:43,780 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:30:43,781 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c6abea1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:30:43,781 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5d8a9c69{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/hadoop.log.dir/,STOPPED} 2024-11-24T08:30:43,789 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T08:30:43,816 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T08:30:43,826 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=155 (was 81) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45383 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f4ddcbf4ff8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:45383 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45383 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40285 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45383 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45383 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45383 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45383 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45383 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45383 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:45383 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:40285 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f4ddcbf4ff8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45383 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 407) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=191 (was 169) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6810 (was 6678) - AvailableMemoryMB LEAK? - 2024-11-24T08:30:43,834 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=155, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=191, ProcessCount=11, AvailableMemoryMB=6810 2024-11-24T08:30:43,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T08:30:43,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/hadoop.log.dir so I do NOT create it in target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce 2024-11-24T08:30:43,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9b17f034-a060-4041-392a-93fbc33540b3/hadoop.tmp.dir so I do NOT create it in target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce 2024-11-24T08:30:43,834 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/cluster_62170018-0ac2-279e-461e-d3edd4af842b, deleteOnExit=true 2024-11-24T08:30:43,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T08:30:43,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/test.cache.data in system properties and HBase conf 2024-11-24T08:30:43,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T08:30:43,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/hadoop.log.dir in system properties and HBase conf 2024-11-24T08:30:43,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T08:30:43,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T08:30:43,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T08:30:43,835 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T08:30:43,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:30:43,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:30:43,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T08:30:43,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:30:43,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T08:30:43,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T08:30:43,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:30:43,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:30:43,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T08:30:43,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/nfs.dump.dir in system properties and HBase conf 2024-11-24T08:30:43,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/java.io.tmpdir in system properties and HBase conf 2024-11-24T08:30:43,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:30:43,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T08:30:43,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T08:30:43,842 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T08:30:43,849 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:30:43,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:43,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:43,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:43,856 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:43,856 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:43,856 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:43,859 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:43,859 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:43,859 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:43,861 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:43,935 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:30:43,939 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:30:43,940 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:30:43,940 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:30:43,940 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:30:43,941 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:30:43,941 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e84569b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:30:43,942 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@302bad64{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:30:44,058 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5e97eedf{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/java.io.tmpdir/jetty-localhost-36331-hadoop-hdfs-3_4_1-tests_jar-_-any-2663666037838943292/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:30:44,059 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f4376e6{HTTP/1.1, (http/1.1)}{localhost:36331} 2024-11-24T08:30:44,059 INFO [Time-limited test {}] server.Server(415): Started @150182ms 2024-11-24T08:30:44,073 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:30:44,146 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:30:44,149 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:30:44,152 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:30:44,152 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:30:44,152 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:30:44,153 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@624b3986{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:30:44,153 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@179d1ca6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:30:44,267 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@232fa1ae{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/java.io.tmpdir/jetty-localhost-43209-hadoop-hdfs-3_4_1-tests_jar-_-any-5170913829200151499/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:30:44,268 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6699fa8b{HTTP/1.1, (http/1.1)}{localhost:43209} 2024-11-24T08:30:44,268 INFO [Time-limited test {}] server.Server(415): Started @150391ms 2024-11-24T08:30:44,270 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:30:44,299 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:30:44,302 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:30:44,303 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:30:44,303 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:30:44,303 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:30:44,303 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@9982f0a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:30:44,304 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a7cb65f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:30:44,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:44,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:44,365 WARN [Thread-1188 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/cluster_62170018-0ac2-279e-461e-d3edd4af842b/data/data1/current/BP-1594007340-172.17.0.2-1732437043875/current, will proceed with Du for space computation calculation, 2024-11-24T08:30:44,365 WARN [Thread-1189 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/cluster_62170018-0ac2-279e-461e-d3edd4af842b/data/data2/current/BP-1594007340-172.17.0.2-1732437043875/current, will proceed with Du for space computation calculation, 2024-11-24T08:30:44,382 WARN [Thread-1167 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:30:44,384 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb0a95d08e562ba12 with lease ID 0x4f01eb904be375f8: Processing first storage report for DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d from datanode DatanodeRegistration(127.0.0.1:44343, datanodeUuid=ab85ff76-27bc-469c-99a2-c05f1573df17, infoPort=44961, infoSecurePort=0, ipcPort=42735, storageInfo=lv=-57;cid=testClusterID;nsid=1251359478;c=1732437043875) 2024-11-24T08:30:44,385 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb0a95d08e562ba12 with lease ID 0x4f01eb904be375f8: from storage DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d node DatanodeRegistration(127.0.0.1:44343, datanodeUuid=ab85ff76-27bc-469c-99a2-c05f1573df17, infoPort=44961, infoSecurePort=0, ipcPort=42735, storageInfo=lv=-57;cid=testClusterID;nsid=1251359478;c=1732437043875), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T08:30:44,385 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb0a95d08e562ba12 with lease ID 0x4f01eb904be375f8: Processing first storage report for DS-d3b9c760-7749-4afe-b9bf-096b3aac93fa from datanode DatanodeRegistration(127.0.0.1:44343, datanodeUuid=ab85ff76-27bc-469c-99a2-c05f1573df17, infoPort=44961, infoSecurePort=0, ipcPort=42735, storageInfo=lv=-57;cid=testClusterID;nsid=1251359478;c=1732437043875) 2024-11-24T08:30:44,385 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb0a95d08e562ba12 with lease ID 0x4f01eb904be375f8: from storage DS-d3b9c760-7749-4afe-b9bf-096b3aac93fa node DatanodeRegistration(127.0.0.1:44343, datanodeUuid=ab85ff76-27bc-469c-99a2-c05f1573df17, infoPort=44961, infoSecurePort=0, ipcPort=42735, storageInfo=lv=-57;cid=testClusterID;nsid=1251359478;c=1732437043875), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:30:44,421 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@54c8c85b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/java.io.tmpdir/jetty-localhost-38267-hadoop-hdfs-3_4_1-tests_jar-_-any-8683399449182350073/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:30:44,421 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1a48749e{HTTP/1.1, (http/1.1)}{localhost:38267} 2024-11-24T08:30:44,421 INFO [Time-limited test {}] server.Server(415): Started @150544ms 2024-11-24T08:30:44,422 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:30:44,519 WARN [Thread-1214 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/cluster_62170018-0ac2-279e-461e-d3edd4af842b/data/data3/current/BP-1594007340-172.17.0.2-1732437043875/current, will proceed with Du for space computation calculation, 2024-11-24T08:30:44,519 WARN [Thread-1215 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/cluster_62170018-0ac2-279e-461e-d3edd4af842b/data/data4/current/BP-1594007340-172.17.0.2-1732437043875/current, will proceed with Du for space computation calculation, 2024-11-24T08:30:44,535 WARN [Thread-1203 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:30:44,537 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb2403cb144f8f146 with lease ID 0x4f01eb904be375f9: Processing first storage report for DS-6e56c470-34a4-4e42-97ab-b35bd3595cc0 from datanode DatanodeRegistration(127.0.0.1:33277, datanodeUuid=acbfbffc-88f0-4344-8543-d929848a9b72, infoPort=43143, infoSecurePort=0, ipcPort=37857, storageInfo=lv=-57;cid=testClusterID;nsid=1251359478;c=1732437043875) 2024-11-24T08:30:44,537 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb2403cb144f8f146 with lease ID 0x4f01eb904be375f9: from storage DS-6e56c470-34a4-4e42-97ab-b35bd3595cc0 node DatanodeRegistration(127.0.0.1:33277, datanodeUuid=acbfbffc-88f0-4344-8543-d929848a9b72, infoPort=43143, infoSecurePort=0, ipcPort=37857, storageInfo=lv=-57;cid=testClusterID;nsid=1251359478;c=1732437043875), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:30:44,537 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb2403cb144f8f146 with lease ID 0x4f01eb904be375f9: Processing first storage report for DS-7fe12906-fdec-4745-98c3-ddcbc5cd828f from datanode DatanodeRegistration(127.0.0.1:33277, datanodeUuid=acbfbffc-88f0-4344-8543-d929848a9b72, infoPort=43143, infoSecurePort=0, ipcPort=37857, storageInfo=lv=-57;cid=testClusterID;nsid=1251359478;c=1732437043875) 2024-11-24T08:30:44,537 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb2403cb144f8f146 with lease ID 0x4f01eb904be375f9: from storage DS-7fe12906-fdec-4745-98c3-ddcbc5cd828f node DatanodeRegistration(127.0.0.1:33277, datanodeUuid=acbfbffc-88f0-4344-8543-d929848a9b72, infoPort=43143, infoSecurePort=0, ipcPort=37857, storageInfo=lv=-57;cid=testClusterID;nsid=1251359478;c=1732437043875), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:30:44,546 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce 2024-11-24T08:30:44,548 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/cluster_62170018-0ac2-279e-461e-d3edd4af842b/zookeeper_0, clientPort=51402, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/cluster_62170018-0ac2-279e-461e-d3edd4af842b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/cluster_62170018-0ac2-279e-461e-d3edd4af842b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T08:30:44,549 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51402 2024-11-24T08:30:44,549 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:30:44,550 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:30:44,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:30:44,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44343 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:30:44,560 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47 with version=8 2024-11-24T08:30:44,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/hbase-staging 2024-11-24T08:30:44,562 INFO [Time-limited test {}] client.ConnectionUtils(128): master/30c28c82771d:0 server-side Connection retries=45 2024-11-24T08:30:44,562 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:30:44,562 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:30:44,562 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:30:44,562 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:30:44,562 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:30:44,562 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T08:30:44,563 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:30:44,563 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42583 2024-11-24T08:30:44,564 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42583 connecting to ZooKeeper ensemble=127.0.0.1:51402 2024-11-24T08:30:44,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:425830x0, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:30:44,571 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42583-0x101491957d30000 connected 2024-11-24T08:30:44,585 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:30:44,587 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:30:44,588 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:30:44,589 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47, hbase.cluster.distributed=false 2024-11-24T08:30:44,590 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:30:44,591 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42583 2024-11-24T08:30:44,591 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42583 2024-11-24T08:30:44,591 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42583 2024-11-24T08:30:44,592 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42583 2024-11-24T08:30:44,592 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42583 2024-11-24T08:30:44,607 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/30c28c82771d:0 server-side Connection retries=45 2024-11-24T08:30:44,607 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:30:44,607 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:30:44,608 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:30:44,608 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:30:44,608 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:30:44,608 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T08:30:44,608 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:30:44,608 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33921 2024-11-24T08:30:44,609 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33921 connecting to ZooKeeper ensemble=127.0.0.1:51402 2024-11-24T08:30:44,610 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:30:44,612 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:30:44,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:339210x0, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:30:44,616 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:339210x0, quorum=127.0.0.1:51402, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:30:44,616 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33921-0x101491957d30001 connected 2024-11-24T08:30:44,616 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T08:30:44,617 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T08:30:44,617 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33921-0x101491957d30001, quorum=127.0.0.1:51402, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T08:30:44,618 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33921-0x101491957d30001, quorum=127.0.0.1:51402, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:30:44,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33921 2024-11-24T08:30:44,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33921 2024-11-24T08:30:44,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33921 2024-11-24T08:30:44,620 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33921 2024-11-24T08:30:44,620 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33921 2024-11-24T08:30:44,632 DEBUG [M:0;30c28c82771d:42583 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;30c28c82771d:42583 2024-11-24T08:30:44,632 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/30c28c82771d,42583,1732437044562 2024-11-24T08:30:44,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:30:44,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33921-0x101491957d30001, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:30:44,634 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/30c28c82771d,42583,1732437044562 2024-11-24T08:30:44,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33921-0x101491957d30001, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T08:30:44,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33921-0x101491957d30001, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:44,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:44,637 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T08:30:44,638 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/30c28c82771d,42583,1732437044562 from backup master directory 2024-11-24T08:30:44,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/30c28c82771d,42583,1732437044562 2024-11-24T08:30:44,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:30:44,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33921-0x101491957d30001, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:30:44,639 WARN [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:30:44,639 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=30c28c82771d,42583,1732437044562 2024-11-24T08:30:44,643 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/hbase.id] with ID: 907bd52b-bdac-41ac-a11b-e189056b0726 2024-11-24T08:30:44,643 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/.tmp/hbase.id 2024-11-24T08:30:44,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:30:44,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44343 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:30:44,651 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/.tmp/hbase.id]:[hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/hbase.id] 2024-11-24T08:30:44,661 INFO [master/30c28c82771d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:30:44,662 INFO [master/30c28c82771d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T08:30:44,663 INFO [master/30c28c82771d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-24T08:30:44,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33921-0x101491957d30001, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:44,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:44,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44343 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:30:44,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:30:44,671 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:30:44,672 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T08:30:44,672 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:30:44,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44343 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:30:44,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:30:44,681 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store 2024-11-24T08:30:44,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:30:44,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44343 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:30:44,687 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:30:44,687 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:30:44,687 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:30:44,687 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:30:44,687 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:30:44,687 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:30:44,687 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:30:44,687 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732437044687Disabling compacts and flushes for region at 1732437044687Disabling writes for close at 1732437044687Writing region close event to WAL at 1732437044687Closed at 1732437044687 2024-11-24T08:30:44,688 WARN [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/.initializing 2024-11-24T08:30:44,688 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/WALs/30c28c82771d,42583,1732437044562 2024-11-24T08:30:44,691 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C42583%2C1732437044562, suffix=, logDir=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/WALs/30c28c82771d,42583,1732437044562, archiveDir=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/oldWALs, maxLogs=10 2024-11-24T08:30:44,691 INFO [master/30c28c82771d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C42583%2C1732437044562.1732437044691 2024-11-24T08:30:44,695 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/WALs/30c28c82771d,42583,1732437044562/30c28c82771d%2C42583%2C1732437044562.1732437044691 2024-11-24T08:30:44,696 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43143:43143),(127.0.0.1/127.0.0.1:44961:44961)] 2024-11-24T08:30:44,698 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:30:44,698 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:30:44,698 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:44,698 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:44,699 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:44,701 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T08:30:44,701 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:44,701 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:30:44,701 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:44,702 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T08:30:44,702 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:44,703 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:30:44,703 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:44,704 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T08:30:44,704 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:44,704 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:30:44,704 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:44,705 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T08:30:44,705 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:44,706 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:30:44,706 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:44,706 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:44,707 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:44,708 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:44,708 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:44,708 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T08:30:44,710 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:30:44,711 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:30:44,712 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=831030, jitterRate=0.05671016871929169}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T08:30:44,713 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732437044698Initializing all the Stores at 1732437044699 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437044699Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437044699Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437044699Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437044699Cleaning up temporary data from old regions at 1732437044708 (+9 ms)Region opened successfully at 1732437044713 (+5 ms) 2024-11-24T08:30:44,713 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T08:30:44,716 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b7732f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30c28c82771d/172.17.0.2:0 2024-11-24T08:30:44,717 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T08:30:44,717 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T08:30:44,717 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T08:30:44,717 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T08:30:44,718 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T08:30:44,718 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T08:30:44,718 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T08:30:44,720 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T08:30:44,721 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T08:30:44,722 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T08:30:44,722 INFO [master/30c28c82771d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T08:30:44,723 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T08:30:44,725 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T08:30:44,725 INFO [master/30c28c82771d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T08:30:44,726 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T08:30:44,727 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T08:30:44,729 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T08:30:44,730 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T08:30:44,732 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T08:30:44,733 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T08:30:44,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33921-0x101491957d30001, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:30:44,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:30:44,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33921-0x101491957d30001, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:44,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:44,737 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=30c28c82771d,42583,1732437044562, sessionid=0x101491957d30000, setting cluster-up flag (Was=false) 2024-11-24T08:30:44,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:44,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33921-0x101491957d30001, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:44,750 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T08:30:44,751 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30c28c82771d,42583,1732437044562 2024-11-24T08:30:44,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33921-0x101491957d30001, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:44,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:44,760 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T08:30:44,761 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30c28c82771d,42583,1732437044562 2024-11-24T08:30:44,762 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T08:30:44,764 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T08:30:44,764 INFO [master/30c28c82771d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T08:30:44,764 INFO [master/30c28c82771d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T08:30:44,765 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 30c28c82771d,42583,1732437044562 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T08:30:44,766 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:30:44,766 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:30:44,766 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:30:44,766 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:30:44,766 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/30c28c82771d:0, corePoolSize=10, maxPoolSize=10 2024-11-24T08:30:44,766 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:44,766 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/30c28c82771d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:30:44,766 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:44,767 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732437074767 2024-11-24T08:30:44,768 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T08:30:44,768 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:30:44,768 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T08:30:44,768 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T08:30:44,768 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T08:30:44,768 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T08:30:44,768 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T08:30:44,768 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T08:30:44,768 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:44,769 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:44,769 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T08:30:44,772 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T08:30:44,772 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T08:30:44,772 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T08:30:44,773 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T08:30:44,773 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T08:30:44,773 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732437044773,5,FailOnTimeoutGroup] 2024-11-24T08:30:44,773 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732437044773,5,FailOnTimeoutGroup] 2024-11-24T08:30:44,773 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:44,773 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T08:30:44,773 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:44,773 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:44,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44343 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:30:44,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:30:44,781 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T08:30:44,782 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47 2024-11-24T08:30:44,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44343 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:30:44,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:30:44,791 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:30:44,792 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:30:44,793 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:30:44,794 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:44,794 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:30:44,794 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:30:44,796 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:30:44,796 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:44,796 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:30:44,796 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:30:44,798 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:30:44,798 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:44,798 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:30:44,798 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:30:44,799 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:30:44,799 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:44,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:30:44,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:30:44,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740 2024-11-24T08:30:44,801 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740 2024-11-24T08:30:44,802 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:30:44,802 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:30:44,802 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:30:44,803 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:30:44,805 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:30:44,806 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=843717, jitterRate=0.07284201681613922}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:30:44,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732437044791Initializing all the Stores at 1732437044792 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437044792Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437044792Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437044792Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437044792Cleaning up temporary data from old regions at 1732437044802 (+10 ms)Region opened successfully at 1732437044806 (+4 ms) 2024-11-24T08:30:44,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:30:44,807 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:30:44,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:30:44,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:30:44,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:30:44,807 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:30:44,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732437044807Disabling compacts and flushes for region at 1732437044807Disabling writes for close at 1732437044807Writing region close event to WAL at 1732437044807Closed at 1732437044807 2024-11-24T08:30:44,809 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:30:44,809 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T08:30:44,809 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T08:30:44,810 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:30:44,811 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T08:30:44,821 INFO [RS:0;30c28c82771d:33921 {}] regionserver.HRegionServer(746): ClusterId : 907bd52b-bdac-41ac-a11b-e189056b0726 2024-11-24T08:30:44,821 DEBUG [RS:0;30c28c82771d:33921 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T08:30:44,824 DEBUG [RS:0;30c28c82771d:33921 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T08:30:44,824 DEBUG [RS:0;30c28c82771d:33921 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T08:30:44,826 DEBUG [RS:0;30c28c82771d:33921 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T08:30:44,827 DEBUG [RS:0;30c28c82771d:33921 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ed90965, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30c28c82771d/172.17.0.2:0 2024-11-24T08:30:44,839 DEBUG [RS:0;30c28c82771d:33921 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;30c28c82771d:33921 2024-11-24T08:30:44,839 INFO [RS:0;30c28c82771d:33921 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T08:30:44,839 INFO [RS:0;30c28c82771d:33921 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T08:30:44,839 DEBUG [RS:0;30c28c82771d:33921 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T08:30:44,840 INFO [RS:0;30c28c82771d:33921 {}] regionserver.HRegionServer(2659): reportForDuty to master=30c28c82771d,42583,1732437044562 with port=33921, startcode=1732437044607 2024-11-24T08:30:44,840 DEBUG [RS:0;30c28c82771d:33921 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T08:30:44,842 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39695, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T08:30:44,842 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42583 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 30c28c82771d,33921,1732437044607 2024-11-24T08:30:44,843 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42583 {}] master.ServerManager(517): Registering regionserver=30c28c82771d,33921,1732437044607 2024-11-24T08:30:44,844 DEBUG [RS:0;30c28c82771d:33921 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47 2024-11-24T08:30:44,844 DEBUG [RS:0;30c28c82771d:33921 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40387 2024-11-24T08:30:44,844 DEBUG [RS:0;30c28c82771d:33921 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T08:30:44,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:30:44,847 DEBUG [RS:0;30c28c82771d:33921 {}] zookeeper.ZKUtil(111): regionserver:33921-0x101491957d30001, quorum=127.0.0.1:51402, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/30c28c82771d,33921,1732437044607 2024-11-24T08:30:44,847 WARN [RS:0;30c28c82771d:33921 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:30:44,847 INFO [RS:0;30c28c82771d:33921 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:30:44,847 DEBUG [RS:0;30c28c82771d:33921 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607 2024-11-24T08:30:44,847 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [30c28c82771d,33921,1732437044607] 2024-11-24T08:30:44,850 INFO [RS:0;30c28c82771d:33921 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T08:30:44,852 INFO [RS:0;30c28c82771d:33921 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T08:30:44,852 INFO [RS:0;30c28c82771d:33921 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T08:30:44,852 INFO [RS:0;30c28c82771d:33921 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:44,852 INFO [RS:0;30c28c82771d:33921 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T08:30:44,853 INFO [RS:0;30c28c82771d:33921 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T08:30:44,853 INFO [RS:0;30c28c82771d:33921 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:44,853 DEBUG [RS:0;30c28c82771d:33921 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:44,853 DEBUG [RS:0;30c28c82771d:33921 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:44,853 DEBUG [RS:0;30c28c82771d:33921 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:44,853 DEBUG [RS:0;30c28c82771d:33921 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:44,853 DEBUG [RS:0;30c28c82771d:33921 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:44,853 DEBUG [RS:0;30c28c82771d:33921 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/30c28c82771d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:30:44,853 DEBUG [RS:0;30c28c82771d:33921 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:44,853 DEBUG [RS:0;30c28c82771d:33921 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:44,853 DEBUG [RS:0;30c28c82771d:33921 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:44,854 DEBUG [RS:0;30c28c82771d:33921 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:44,854 DEBUG [RS:0;30c28c82771d:33921 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:44,854 DEBUG [RS:0;30c28c82771d:33921 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:30:44,854 DEBUG [RS:0;30c28c82771d:33921 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/30c28c82771d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:30:44,854 DEBUG [RS:0;30c28c82771d:33921 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:30:44,854 INFO [RS:0;30c28c82771d:33921 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:44,854 INFO [RS:0;30c28c82771d:33921 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:44,854 INFO [RS:0;30c28c82771d:33921 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:44,854 INFO [RS:0;30c28c82771d:33921 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:44,854 INFO [RS:0;30c28c82771d:33921 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:44,854 INFO [RS:0;30c28c82771d:33921 {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,33921,1732437044607-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:30:44,878 INFO [RS:0;30c28c82771d:33921 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T08:30:44,878 INFO [RS:0;30c28c82771d:33921 {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,33921,1732437044607-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:44,879 INFO [RS:0;30c28c82771d:33921 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:44,879 INFO [RS:0;30c28c82771d:33921 {}] regionserver.Replication(171): 30c28c82771d,33921,1732437044607 started 2024-11-24T08:30:44,893 INFO [RS:0;30c28c82771d:33921 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:44,893 INFO [RS:0;30c28c82771d:33921 {}] regionserver.HRegionServer(1482): Serving as 30c28c82771d,33921,1732437044607, RpcServer on 30c28c82771d/172.17.0.2:33921, sessionid=0x101491957d30001 2024-11-24T08:30:44,893 DEBUG [RS:0;30c28c82771d:33921 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T08:30:44,893 DEBUG [RS:0;30c28c82771d:33921 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 30c28c82771d,33921,1732437044607 2024-11-24T08:30:44,893 DEBUG [RS:0;30c28c82771d:33921 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30c28c82771d,33921,1732437044607' 2024-11-24T08:30:44,893 DEBUG [RS:0;30c28c82771d:33921 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T08:30:44,894 DEBUG [RS:0;30c28c82771d:33921 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T08:30:44,894 DEBUG [RS:0;30c28c82771d:33921 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T08:30:44,894 DEBUG [RS:0;30c28c82771d:33921 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T08:30:44,894 DEBUG [RS:0;30c28c82771d:33921 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 30c28c82771d,33921,1732437044607 2024-11-24T08:30:44,894 DEBUG [RS:0;30c28c82771d:33921 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30c28c82771d,33921,1732437044607' 2024-11-24T08:30:44,894 DEBUG [RS:0;30c28c82771d:33921 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T08:30:44,895 DEBUG [RS:0;30c28c82771d:33921 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T08:30:44,895 DEBUG [RS:0;30c28c82771d:33921 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T08:30:44,895 INFO [RS:0;30c28c82771d:33921 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T08:30:44,895 INFO [RS:0;30c28c82771d:33921 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T08:30:44,962 WARN [30c28c82771d:42583 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T08:30:44,997 INFO [RS:0;30c28c82771d:33921 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C33921%2C1732437044607, suffix=, logDir=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607, archiveDir=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/oldWALs, maxLogs=32 2024-11-24T08:30:44,998 INFO [RS:0;30c28c82771d:33921 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C33921%2C1732437044607.1732437044998 2024-11-24T08:30:45,004 INFO [RS:0;30c28c82771d:33921 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437044998 2024-11-24T08:30:45,005 DEBUG [RS:0;30c28c82771d:33921 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44961:44961),(127.0.0.1/127.0.0.1:43143:43143)] 2024-11-24T08:30:45,212 DEBUG [30c28c82771d:42583 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T08:30:45,213 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=30c28c82771d,33921,1732437044607 2024-11-24T08:30:45,214 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30c28c82771d,33921,1732437044607, state=OPENING 2024-11-24T08:30:45,215 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T08:30:45,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:45,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33921-0x101491957d30001, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:30:45,217 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:30:45,217 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:30:45,217 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:30:45,217 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=30c28c82771d,33921,1732437044607}] 2024-11-24T08:30:45,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:45,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:45,371 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T08:30:45,373 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42733, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T08:30:45,376 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T08:30:45,376 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:30:45,378 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C33921%2C1732437044607.meta, suffix=.meta, logDir=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607, archiveDir=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/oldWALs, maxLogs=32 2024-11-24T08:30:45,378 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C33921%2C1732437044607.meta.1732437045378.meta 2024-11-24T08:30:45,383 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.meta.1732437045378.meta 2024-11-24T08:30:45,384 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44961:44961),(127.0.0.1/127.0.0.1:43143:43143)] 2024-11-24T08:30:45,385 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:30:45,385 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T08:30:45,385 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T08:30:45,385 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T08:30:45,385 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T08:30:45,385 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:30:45,386 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T08:30:45,386 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T08:30:45,387 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:30:45,388 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:30:45,388 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:45,388 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:30:45,388 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:30:45,389 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:30:45,389 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:45,389 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:30:45,389 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:30:45,390 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:30:45,390 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:45,391 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:30:45,391 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:30:45,391 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:30:45,391 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:45,392 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:30:45,392 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:30:45,392 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740 2024-11-24T08:30:45,394 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740 2024-11-24T08:30:45,395 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:30:45,395 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:30:45,395 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:30:45,397 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:30:45,397 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=881683, jitterRate=0.1211184561252594}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:30:45,397 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T08:30:45,398 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732437045386Writing region info on filesystem at 1732437045386Initializing all the Stores at 1732437045386Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437045386Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437045387 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437045387Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437045387Cleaning up temporary data from old regions at 1732437045395 (+8 ms)Running coprocessor post-open hooks at 1732437045397 (+2 ms)Region opened successfully at 1732437045398 (+1 ms) 2024-11-24T08:30:45,399 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732437045370 2024-11-24T08:30:45,401 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T08:30:45,401 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T08:30:45,402 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=30c28c82771d,33921,1732437044607 2024-11-24T08:30:45,403 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30c28c82771d,33921,1732437044607, state=OPEN 2024-11-24T08:30:45,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33921-0x101491957d30001, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:30:45,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:30:45,409 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=30c28c82771d,33921,1732437044607 2024-11-24T08:30:45,409 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:30:45,409 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:30:45,412 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T08:30:45,412 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=30c28c82771d,33921,1732437044607 in 192 msec 2024-11-24T08:30:45,415 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T08:30:45,415 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 603 msec 2024-11-24T08:30:45,416 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:30:45,416 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T08:30:45,417 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:30:45,417 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30c28c82771d,33921,1732437044607, seqNum=-1] 2024-11-24T08:30:45,417 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:30:45,419 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59399, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:30:45,424 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 661 msec 2024-11-24T08:30:45,425 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732437045425, completionTime=-1 2024-11-24T08:30:45,425 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T08:30:45,425 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T08:30:45,426 INFO [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T08:30:45,426 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732437105426 2024-11-24T08:30:45,426 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732437165426 2024-11-24T08:30:45,427 INFO [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-24T08:30:45,427 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,42583,1732437044562-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:45,427 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,42583,1732437044562-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:45,427 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,42583,1732437044562-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:45,427 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-30c28c82771d:42583, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:45,427 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:45,427 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:45,429 DEBUG [master/30c28c82771d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T08:30:45,431 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.792sec 2024-11-24T08:30:45,431 INFO [master/30c28c82771d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T08:30:45,431 INFO [master/30c28c82771d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T08:30:45,431 INFO [master/30c28c82771d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T08:30:45,431 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T08:30:45,431 INFO [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T08:30:45,431 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,42583,1732437044562-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:30:45,431 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,42583,1732437044562-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T08:30:45,434 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T08:30:45,434 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T08:30:45,434 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,42583,1732437044562-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:30:45,522 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12f241e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:30:45,522 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 30c28c82771d,42583,-1 for getting cluster id 2024-11-24T08:30:45,522 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T08:30:45,524 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '907bd52b-bdac-41ac-a11b-e189056b0726' 2024-11-24T08:30:45,524 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T08:30:45,524 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "907bd52b-bdac-41ac-a11b-e189056b0726" 2024-11-24T08:30:45,525 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66549224, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:30:45,525 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [30c28c82771d,42583,-1] 2024-11-24T08:30:45,525 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T08:30:45,525 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:30:45,526 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33002, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T08:30:45,527 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e01093b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:30:45,527 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:30:45,528 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30c28c82771d,33921,1732437044607, seqNum=-1] 2024-11-24T08:30:45,529 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:30:45,531 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47150, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:30:45,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=30c28c82771d,42583,1732437044562 2024-11-24T08:30:45,533 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:30:45,535 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T08:30:45,535 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-24T08:30:45,535 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-24T08:30:45,535 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T08:30:45,536 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 30c28c82771d,42583,1732437044562 2024-11-24T08:30:45,536 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1954c949 2024-11-24T08:30:45,536 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T08:30:45,538 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33014, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T08:30:45,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42583 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T08:30:45,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42583 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T08:30:45,539 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42583 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:30:45,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42583 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T08:30:45,541 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T08:30:45,541 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:45,542 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42583 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-24T08:30:45,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42583 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T08:30:45,543 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T08:30:45,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44343 is added to blk_1073741835_1011 (size=395) 2024-11-24T08:30:45,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741835_1011 (size=395) 2024-11-24T08:30:45,552 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 90aebe4192f3f9004eb2e133a8c9b599, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47 2024-11-24T08:30:45,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44343 is added to blk_1073741836_1012 (size=78) 2024-11-24T08:30:45,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741836_1012 (size=78) 2024-11-24T08:30:45,559 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:30:45,559 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 90aebe4192f3f9004eb2e133a8c9b599, disabling compactions & flushes 2024-11-24T08:30:45,559 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599. 2024-11-24T08:30:45,559 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599. 2024-11-24T08:30:45,559 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599. after waiting 0 ms 2024-11-24T08:30:45,559 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599. 2024-11-24T08:30:45,559 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599. 2024-11-24T08:30:45,559 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 90aebe4192f3f9004eb2e133a8c9b599: Waiting for close lock at 1732437045559Disabling compacts and flushes for region at 1732437045559Disabling writes for close at 1732437045559Writing region close event to WAL at 1732437045559Closed at 1732437045559 2024-11-24T08:30:45,560 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T08:30:45,561 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732437045560"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732437045560"}]},"ts":"1732437045560"} 2024-11-24T08:30:45,563 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T08:30:45,564 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T08:30:45,565 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732437045564"}]},"ts":"1732437045564"} 2024-11-24T08:30:45,566 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-24T08:30:45,567 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=90aebe4192f3f9004eb2e133a8c9b599, ASSIGN}] 2024-11-24T08:30:45,568 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=90aebe4192f3f9004eb2e133a8c9b599, ASSIGN 2024-11-24T08:30:45,569 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=90aebe4192f3f9004eb2e133a8c9b599, ASSIGN; state=OFFLINE, location=30c28c82771d,33921,1732437044607; forceNewPlan=false, retain=false 2024-11-24T08:30:45,720 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=90aebe4192f3f9004eb2e133a8c9b599, regionState=OPENING, regionLocation=30c28c82771d,33921,1732437044607 2024-11-24T08:30:45,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=90aebe4192f3f9004eb2e133a8c9b599, ASSIGN because future has completed 2024-11-24T08:30:45,723 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 90aebe4192f3f9004eb2e133a8c9b599, server=30c28c82771d,33921,1732437044607}] 2024-11-24T08:30:45,880 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599. 2024-11-24T08:30:45,880 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 90aebe4192f3f9004eb2e133a8c9b599, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:30:45,880 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 90aebe4192f3f9004eb2e133a8c9b599 2024-11-24T08:30:45,881 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:30:45,881 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 90aebe4192f3f9004eb2e133a8c9b599 2024-11-24T08:30:45,881 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 90aebe4192f3f9004eb2e133a8c9b599 2024-11-24T08:30:45,882 INFO [StoreOpener-90aebe4192f3f9004eb2e133a8c9b599-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 90aebe4192f3f9004eb2e133a8c9b599 2024-11-24T08:30:45,883 INFO [StoreOpener-90aebe4192f3f9004eb2e133a8c9b599-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 90aebe4192f3f9004eb2e133a8c9b599 columnFamilyName info 2024-11-24T08:30:45,883 DEBUG [StoreOpener-90aebe4192f3f9004eb2e133a8c9b599-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:30:45,884 INFO [StoreOpener-90aebe4192f3f9004eb2e133a8c9b599-1 {}] regionserver.HStore(327): Store=90aebe4192f3f9004eb2e133a8c9b599/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:30:45,884 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 90aebe4192f3f9004eb2e133a8c9b599 2024-11-24T08:30:45,885 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/default/TestLogRolling-testLogRollOnPipelineRestart/90aebe4192f3f9004eb2e133a8c9b599 2024-11-24T08:30:45,885 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/default/TestLogRolling-testLogRollOnPipelineRestart/90aebe4192f3f9004eb2e133a8c9b599 2024-11-24T08:30:45,885 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 90aebe4192f3f9004eb2e133a8c9b599 2024-11-24T08:30:45,885 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 90aebe4192f3f9004eb2e133a8c9b599 2024-11-24T08:30:45,887 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 90aebe4192f3f9004eb2e133a8c9b599 2024-11-24T08:30:45,889 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/default/TestLogRolling-testLogRollOnPipelineRestart/90aebe4192f3f9004eb2e133a8c9b599/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:30:45,889 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 90aebe4192f3f9004eb2e133a8c9b599; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=722122, jitterRate=-0.08177453279495239}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T08:30:45,890 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 90aebe4192f3f9004eb2e133a8c9b599 2024-11-24T08:30:45,890 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 90aebe4192f3f9004eb2e133a8c9b599: Running coprocessor pre-open hook at 1732437045881Writing region info on filesystem at 1732437045881Initializing all the Stores at 1732437045882 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437045882Cleaning up temporary data from old regions at 1732437045885 (+3 ms)Running coprocessor post-open hooks at 1732437045890 (+5 ms)Region opened successfully at 1732437045890 2024-11-24T08:30:45,891 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599., pid=6, masterSystemTime=1732437045876 2024-11-24T08:30:45,893 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599. 2024-11-24T08:30:45,893 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599. 2024-11-24T08:30:45,894 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=90aebe4192f3f9004eb2e133a8c9b599, regionState=OPEN, openSeqNum=2, regionLocation=30c28c82771d,33921,1732437044607 2024-11-24T08:30:45,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 90aebe4192f3f9004eb2e133a8c9b599, server=30c28c82771d,33921,1732437044607 because future has completed 2024-11-24T08:30:45,901 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T08:30:45,901 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 90aebe4192f3f9004eb2e133a8c9b599, server=30c28c82771d,33921,1732437044607 in 175 msec 2024-11-24T08:30:45,903 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T08:30:45,903 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=90aebe4192f3f9004eb2e133a8c9b599, ASSIGN in 334 msec 2024-11-24T08:30:45,904 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T08:30:45,904 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732437045904"}]},"ts":"1732437045904"} 2024-11-24T08:30:45,906 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-24T08:30:45,907 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T08:30:45,909 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 368 msec 2024-11-24T08:30:46,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:46,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:47,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:47,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:48,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:48,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:49,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:49,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:49,522 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T08:30:49,522 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T08:30:49,523 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T08:30:49,523 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-24T08:30:49,524 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:30:49,524 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T08:30:50,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:50,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:50,888 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T08:30:50,906 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:50,906 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:50,906 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:50,907 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:50,907 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:50,908 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:50,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:50,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:50,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:50,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:30:50,924 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T08:30:50,924 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-24T08:30:51,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:51,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:52,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:52,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:53,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:53,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:54,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:54,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:55,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:55,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:55,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42583 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T08:30:55,606 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-24T08:30:55,606 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-24T08:30:55,610 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T08:30:55,610 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599. 2024-11-24T08:30:55,613 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599., hostname=30c28c82771d,33921,1732437044607, seqNum=2] 2024-11-24T08:30:56,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:56,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:57,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:57,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:57,616 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437044998 2024-11-24T08:30:57,617 WARN [ResponseProcessor for block BP-1594007340-172.17.0.2-1732437043875:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1594007340-172.17.0.2-1732437043875:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:57,617 WARN [DataStreamer for file /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/WALs/30c28c82771d,42583,1732437044562/30c28c82771d%2C42583%2C1732437044562.1732437044691 block BP-1594007340-172.17.0.2-1732437043875:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1594007340-172.17.0.2-1732437043875:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33277,DS-6e56c470-34a4-4e42-97ab-b35bd3595cc0,DISK], DatanodeInfoWithStorage[127.0.0.1:44343,DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33277,DS-6e56c470-34a4-4e42-97ab-b35bd3595cc0,DISK]) is bad. 2024-11-24T08:30:57,617 WARN [ResponseProcessor for block BP-1594007340-172.17.0.2-1732437043875:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1594007340-172.17.0.2-1732437043875:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1594007340-172.17.0.2-1732437043875:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:33277,DS-6e56c470-34a4-4e42-97ab-b35bd3595cc0,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:57,617 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-195191428_22 at /127.0.0.1:41550 [Receiving block BP-1594007340-172.17.0.2-1732437043875:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33277:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41550 dst: /127.0.0.1:33277 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:57,618 WARN [PacketResponder: BP-1594007340-172.17.0.2-1732437043875:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33277] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:57,618 WARN [DataStreamer for file /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437044998 block BP-1594007340-172.17.0.2-1732437043875:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1594007340-172.17.0.2-1732437043875:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44343,DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d,DISK], DatanodeInfoWithStorage[127.0.0.1:33277,DS-6e56c470-34a4-4e42-97ab-b35bd3595cc0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33277,DS-6e56c470-34a4-4e42-97ab-b35bd3595cc0,DISK]) is bad. 2024-11-24T08:30:57,617 WARN [ResponseProcessor for block BP-1594007340-172.17.0.2-1732437043875:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1594007340-172.17.0.2-1732437043875:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1594007340-172.17.0.2-1732437043875:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:33277,DS-6e56c470-34a4-4e42-97ab-b35bd3595cc0,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:57,618 WARN [DataStreamer for file /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.meta.1732437045378.meta block BP-1594007340-172.17.0.2-1732437043875:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1594007340-172.17.0.2-1732437043875:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44343,DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d,DISK], DatanodeInfoWithStorage[127.0.0.1:33277,DS-6e56c470-34a4-4e42-97ab-b35bd3595cc0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33277,DS-6e56c470-34a4-4e42-97ab-b35bd3595cc0,DISK]) is bad. 2024-11-24T08:30:57,618 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-195191428_22 at /127.0.0.1:35288 [Receiving block BP-1594007340-172.17.0.2-1732437043875:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35288 dst: /127.0.0.1:44343 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:57,618 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-161304045_22 at /127.0.0.1:35318 [Receiving block BP-1594007340-172.17.0.2-1732437043875:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35318 dst: /127.0.0.1:44343 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:57,618 WARN [PacketResponder: BP-1594007340-172.17.0.2-1732437043875:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33277] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:57,618 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-161304045_22 at /127.0.0.1:41584 [Receiving block BP-1594007340-172.17.0.2-1732437043875:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33277:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41584 dst: /127.0.0.1:33277 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:57,619 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-161304045_22 at /127.0.0.1:35320 [Receiving block BP-1594007340-172.17.0.2-1732437043875:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35320 dst: /127.0.0.1:44343 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:57,619 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-161304045_22 at /127.0.0.1:41592 [Receiving block BP-1594007340-172.17.0.2-1732437043875:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33277:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41592 dst: /127.0.0.1:33277 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:57,621 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@54c8c85b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:30:57,623 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1a48749e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:30:57,623 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:30:57,623 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a7cb65f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:30:57,623 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@9982f0a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/hadoop.log.dir/,STOPPED} 2024-11-24T08:30:57,624 WARN [BP-1594007340-172.17.0.2-1732437043875 heartbeating to localhost/127.0.0.1:40387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:30:57,624 WARN [BP-1594007340-172.17.0.2-1732437043875 heartbeating to localhost/127.0.0.1:40387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1594007340-172.17.0.2-1732437043875 (Datanode Uuid acbfbffc-88f0-4344-8543-d929848a9b72) service to localhost/127.0.0.1:40387 2024-11-24T08:30:57,624 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:30:57,624 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:30:57,625 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/cluster_62170018-0ac2-279e-461e-d3edd4af842b/data/data3/current/BP-1594007340-172.17.0.2-1732437043875 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:30:57,625 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/cluster_62170018-0ac2-279e-461e-d3edd4af842b/data/data4/current/BP-1594007340-172.17.0.2-1732437043875 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:30:57,625 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:30:57,636 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:30:57,641 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:30:57,641 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:30:57,641 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:30:57,642 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:30:57,642 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50ba6dae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:30:57,642 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11fd78ef{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:30:57,758 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b408bc7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/java.io.tmpdir/jetty-localhost-43409-hadoop-hdfs-3_4_1-tests_jar-_-any-2193302719147854379/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:30:57,758 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70d6804b{HTTP/1.1, (http/1.1)}{localhost:43409} 2024-11-24T08:30:57,758 INFO [Time-limited test {}] server.Server(415): Started @163881ms 2024-11-24T08:30:57,759 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:30:57,777 WARN [ResponseProcessor for block BP-1594007340-172.17.0.2-1732437043875:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1594007340-172.17.0.2-1732437043875:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:57,777 WARN [ResponseProcessor for block BP-1594007340-172.17.0.2-1732437043875:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1594007340-172.17.0.2-1732437043875:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:57,777 WARN [ResponseProcessor for block BP-1594007340-172.17.0.2-1732437043875:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1594007340-172.17.0.2-1732437043875:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:57,777 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-195191428_22 at /127.0.0.1:51250 [Receiving block BP-1594007340-172.17.0.2-1732437043875:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51250 dst: /127.0.0.1:44343 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:57,777 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-161304045_22 at /127.0.0.1:51266 [Receiving block BP-1594007340-172.17.0.2-1732437043875:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51266 dst: /127.0.0.1:44343 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:57,778 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-161304045_22 at /127.0.0.1:51264 [Receiving block BP-1594007340-172.17.0.2-1732437043875:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51264 dst: /127.0.0.1:44343 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:30:57,784 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@232fa1ae{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:30:57,785 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6699fa8b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:30:57,785 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:30:57,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@179d1ca6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:30:57,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@624b3986{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/hadoop.log.dir/,STOPPED} 2024-11-24T08:30:57,786 WARN [BP-1594007340-172.17.0.2-1732437043875 heartbeating to localhost/127.0.0.1:40387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:30:57,786 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:30:57,786 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:30:57,786 WARN [BP-1594007340-172.17.0.2-1732437043875 heartbeating to localhost/127.0.0.1:40387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1594007340-172.17.0.2-1732437043875 (Datanode Uuid ab85ff76-27bc-469c-99a2-c05f1573df17) service to localhost/127.0.0.1:40387 2024-11-24T08:30:57,788 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/cluster_62170018-0ac2-279e-461e-d3edd4af842b/data/data1/current/BP-1594007340-172.17.0.2-1732437043875 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:30:57,788 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/cluster_62170018-0ac2-279e-461e-d3edd4af842b/data/data2/current/BP-1594007340-172.17.0.2-1732437043875 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:30:57,788 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:30:57,796 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:30:57,799 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:30:57,800 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:30:57,800 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:30:57,800 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:30:57,800 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4582f0a8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:30:57,800 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c7c7c1e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:30:57,860 WARN [Thread-1338 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:30:57,862 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf78791cd0c8ce127 with lease ID 0x4f01eb904be375fa: from storage DS-6e56c470-34a4-4e42-97ab-b35bd3595cc0 node DatanodeRegistration(127.0.0.1:35819, datanodeUuid=acbfbffc-88f0-4344-8543-d929848a9b72, infoPort=40689, infoSecurePort=0, ipcPort=36529, storageInfo=lv=-57;cid=testClusterID;nsid=1251359478;c=1732437043875), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:30:57,862 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf78791cd0c8ce127 with lease ID 0x4f01eb904be375fa: from storage DS-7fe12906-fdec-4745-98c3-ddcbc5cd828f node DatanodeRegistration(127.0.0.1:35819, datanodeUuid=acbfbffc-88f0-4344-8543-d929848a9b72, infoPort=40689, infoSecurePort=0, ipcPort=36529, storageInfo=lv=-57;cid=testClusterID;nsid=1251359478;c=1732437043875), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:30:57,917 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3ce7ccc4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/java.io.tmpdir/jetty-localhost-46181-hadoop-hdfs-3_4_1-tests_jar-_-any-1746267062246636988/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:30:57,917 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@167fd01b{HTTP/1.1, (http/1.1)}{localhost:46181} 2024-11-24T08:30:57,917 INFO [Time-limited test {}] server.Server(415): Started @164040ms 2024-11-24T08:30:57,918 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:30:58,020 WARN [Thread-1369 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:30:58,023 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x94a94786417147e7 with lease ID 0x4f01eb904be375fb: from storage DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d node DatanodeRegistration(127.0.0.1:34329, datanodeUuid=ab85ff76-27bc-469c-99a2-c05f1573df17, infoPort=42485, infoSecurePort=0, ipcPort=42611, storageInfo=lv=-57;cid=testClusterID;nsid=1251359478;c=1732437043875), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:30:58,023 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x94a94786417147e7 with lease ID 0x4f01eb904be375fb: from storage DS-d3b9c760-7749-4afe-b9bf-096b3aac93fa node DatanodeRegistration(127.0.0.1:34329, datanodeUuid=ab85ff76-27bc-469c-99a2-c05f1573df17, infoPort=42485, infoSecurePort=0, ipcPort=42611, storageInfo=lv=-57;cid=testClusterID;nsid=1251359478;c=1732437043875), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:30:58,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:58,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:58,938 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-24T08:30:58,940 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-24T08:30:58,941 ERROR [FSHLog-0-hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47-prefix:30c28c82771d,33921,1732437044607 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44343,DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:58,942 WARN [FSHLog-0-hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47-prefix:30c28c82771d,33921,1732437044607 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44343,DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:58,942 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30c28c82771d%2C33921%2C1732437044607:(num 1732437044998) roll requested 2024-11-24T08:30:58,942 INFO [regionserver/30c28c82771d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C33921%2C1732437044607.1732437058942 2024-11-24T08:30:58,948 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437044998 newFile=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437058942 2024-11-24T08:30:58,948 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:58,948 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:58,948 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:58,948 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:58,948 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:30:58,949 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437044998 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437058942 2024-11-24T08:30:58,949 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44343,DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:58,949 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44343,DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:30:58,949 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437044998 2024-11-24T08:30:58,949 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42485:42485),(127.0.0.1/127.0.0.1:40689:40689)] 2024-11-24T08:30:58,949 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437044998 is not closed yet, will try archiving it next time 2024-11-24T08:30:58,950 WARN [IPC Server handler 2 on default port 40387 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437044998 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1015 2024-11-24T08:30:58,950 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437044998 after 1ms 2024-11-24T08:30:59,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34329 is added to blk_1073741833_1017 (size=1632) 2024-11-24T08:30:59,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:59,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:30:59,864 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T08:31:00,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:00,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:00,982 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-24T08:31:01,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:01,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:02,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:02,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:02,951 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437044998 after 4002ms 2024-11-24T08:31:02,986 WARN [ResponseProcessor for block BP-1594007340-172.17.0.2-1732437043875:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1594007340-172.17.0.2-1732437043875:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:31:02,986 WARN [DataStreamer for file /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437058942 block BP-1594007340-172.17.0.2-1732437043875:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1594007340-172.17.0.2-1732437043875:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34329,DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d,DISK], DatanodeInfoWithStorage[127.0.0.1:35819,DS-6e56c470-34a4-4e42-97ab-b35bd3595cc0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34329,DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d,DISK]) is bad. 2024-11-24T08:31:02,987 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-161304045_22 at /127.0.0.1:42170 [Receiving block BP-1594007340-172.17.0.2-1732437043875:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35819:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42170 dst: /127.0.0.1:35819 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:31:02,987 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-161304045_22 at /127.0.0.1:36272 [Receiving block BP-1594007340-172.17.0.2-1732437043875:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:34329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36272 dst: /127.0.0.1:34329 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:31:02,989 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3ce7ccc4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:31:02,989 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@167fd01b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:31:02,989 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:31:02,989 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c7c7c1e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:31:02,989 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4582f0a8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/hadoop.log.dir/,STOPPED} 2024-11-24T08:31:02,990 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:31:02,990 WARN [BP-1594007340-172.17.0.2-1732437043875 heartbeating to localhost/127.0.0.1:40387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:31:02,990 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:31:02,990 WARN [BP-1594007340-172.17.0.2-1732437043875 heartbeating to localhost/127.0.0.1:40387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1594007340-172.17.0.2-1732437043875 (Datanode Uuid ab85ff76-27bc-469c-99a2-c05f1573df17) service to localhost/127.0.0.1:40387 2024-11-24T08:31:02,991 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/cluster_62170018-0ac2-279e-461e-d3edd4af842b/data/data1/current/BP-1594007340-172.17.0.2-1732437043875 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:31:02,991 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/cluster_62170018-0ac2-279e-461e-d3edd4af842b/data/data2/current/BP-1594007340-172.17.0.2-1732437043875 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:31:02,992 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:31:03,000 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:31:03,004 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:31:03,005 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:31:03,005 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:31:03,005 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:31:03,005 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ae00177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:31:03,006 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49f94f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:31:03,119 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5aaed393{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/java.io.tmpdir/jetty-localhost-43227-hadoop-hdfs-3_4_1-tests_jar-_-any-9709821204283127418/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:31:03,119 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7939cb3e{HTTP/1.1, (http/1.1)}{localhost:43227} 2024-11-24T08:31:03,119 INFO [Time-limited test {}] server.Server(415): Started @169242ms 2024-11-24T08:31:03,121 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:31:03,145 WARN [ResponseProcessor for block BP-1594007340-172.17.0.2-1732437043875:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1594007340-172.17.0.2-1732437043875:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:31:03,145 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-161304045_22 at /127.0.0.1:43984 [Receiving block BP-1594007340-172.17.0.2-1732437043875:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35819:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43984 dst: /127.0.0.1:35819 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:31:03,150 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b408bc7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:31:03,150 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70d6804b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:31:03,150 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:31:03,150 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11fd78ef{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:31:03,150 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50ba6dae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/hadoop.log.dir/,STOPPED} 2024-11-24T08:31:03,151 WARN [BP-1594007340-172.17.0.2-1732437043875 heartbeating to localhost/127.0.0.1:40387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:31:03,151 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:31:03,152 WARN [BP-1594007340-172.17.0.2-1732437043875 heartbeating to localhost/127.0.0.1:40387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1594007340-172.17.0.2-1732437043875 (Datanode Uuid acbfbffc-88f0-4344-8543-d929848a9b72) service to localhost/127.0.0.1:40387 2024-11-24T08:31:03,152 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:31:03,152 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/cluster_62170018-0ac2-279e-461e-d3edd4af842b/data/data3/current/BP-1594007340-172.17.0.2-1732437043875 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:31:03,152 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/cluster_62170018-0ac2-279e-461e-d3edd4af842b/data/data4/current/BP-1594007340-172.17.0.2-1732437043875 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:31:03,153 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:31:03,167 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:31:03,169 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:31:03,173 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:31:03,173 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:31:03,173 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:31:03,174 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16a09209{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:31:03,174 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64e298ce{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:31:03,223 WARN [Thread-1412 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:31:03,225 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3da0ac99368f3478 with lease ID 0x4f01eb904be375fc: from storage DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d node DatanodeRegistration(127.0.0.1:39995, datanodeUuid=ab85ff76-27bc-469c-99a2-c05f1573df17, infoPort=38385, infoSecurePort=0, ipcPort=44911, storageInfo=lv=-57;cid=testClusterID;nsid=1251359478;c=1732437043875), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:31:03,225 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3da0ac99368f3478 with lease ID 0x4f01eb904be375fc: from storage DS-d3b9c760-7749-4afe-b9bf-096b3aac93fa node DatanodeRegistration(127.0.0.1:39995, datanodeUuid=ab85ff76-27bc-469c-99a2-c05f1573df17, infoPort=38385, infoSecurePort=0, ipcPort=44911, storageInfo=lv=-57;cid=testClusterID;nsid=1251359478;c=1732437043875), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:31:03,289 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@787041af{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/java.io.tmpdir/jetty-localhost-41665-hadoop-hdfs-3_4_1-tests_jar-_-any-4809515520796098279/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:31:03,289 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7269a538{HTTP/1.1, (http/1.1)}{localhost:41665} 2024-11-24T08:31:03,289 INFO [Time-limited test {}] server.Server(415): Started @169412ms 2024-11-24T08:31:03,291 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:31:03,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:03,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:03,377 WARN [Thread-1443 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:31:03,379 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4309edea1bcc0035 with lease ID 0x4f01eb904be375fd: from storage DS-6e56c470-34a4-4e42-97ab-b35bd3595cc0 node DatanodeRegistration(127.0.0.1:33811, datanodeUuid=acbfbffc-88f0-4344-8543-d929848a9b72, infoPort=36137, infoSecurePort=0, ipcPort=45579, storageInfo=lv=-57;cid=testClusterID;nsid=1251359478;c=1732437043875), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:31:03,379 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4309edea1bcc0035 with lease ID 0x4f01eb904be375fd: from storage DS-7fe12906-fdec-4745-98c3-ddcbc5cd828f node DatanodeRegistration(127.0.0.1:33811, datanodeUuid=acbfbffc-88f0-4344-8543-d929848a9b72, infoPort=36137, infoSecurePort=0, ipcPort=45579, storageInfo=lv=-57;cid=testClusterID;nsid=1251359478;c=1732437043875), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:31:04,313 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-24T08:31:04,315 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-24T08:31:04,317 ERROR [FSHLog-0-hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47-prefix:30c28c82771d,33921,1732437044607 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35819,DS-6e56c470-34a4-4e42-97ab-b35bd3595cc0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:31:04,317 WARN [FSHLog-0-hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47-prefix:30c28c82771d,33921,1732437044607 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35819,DS-6e56c470-34a4-4e42-97ab-b35bd3595cc0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:31:04,317 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30c28c82771d%2C33921%2C1732437044607:(num 1732437058942) roll requested 2024-11-24T08:31:04,317 INFO [regionserver/30c28c82771d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C33921%2C1732437044607.1732437064317 2024-11-24T08:31:04,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:04,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:04,323 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437058942 newFile=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437064317 2024-11-24T08:31:04,323 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:04,323 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:04,323 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:04,324 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:04,324 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:04,324 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437058942 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437064317 2024-11-24T08:31:04,324 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35819,DS-6e56c470-34a4-4e42-97ab-b35bd3595cc0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:31:04,324 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35819,DS-6e56c470-34a4-4e42-97ab-b35bd3595cc0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:31:04,324 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437058942 2024-11-24T08:31:04,325 WARN [IPC Server handler 3 on default port 40387 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437058942 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-24T08:31:04,325 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36137:36137),(127.0.0.1/127.0.0.1:38385:38385)] 2024-11-24T08:31:04,325 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437058942 is not closed yet, will try archiving it next time 2024-11-24T08:31:04,325 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437058942 after 1ms 2024-11-24T08:31:05,225 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T08:31:05,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:05,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:06,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:06,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:06,326 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C33921%2C1732437044607.1732437066326 2024-11-24T08:31:06,334 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437064317 newFile=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437066326 2024-11-24T08:31:06,334 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:06,334 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:06,334 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:06,334 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:06,334 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:06,335 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437064317 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437066326 2024-11-24T08:31:06,336 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38385:38385),(127.0.0.1/127.0.0.1:36137:36137)] 2024-11-24T08:31:06,336 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437058942 is not closed yet, will try archiving it next time 2024-11-24T08:31:06,336 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437064317 is not closed yet, will try archiving it next time 2024-11-24T08:31:06,336 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437044998 2024-11-24T08:31:06,336 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437044998 2024-11-24T08:31:06,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39995 is added to blk_1073741838_1019 (size=1264) 2024-11-24T08:31:06,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741838_1019 (size=1264) 2024-11-24T08:31:06,337 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437044998 after 1ms 2024-11-24T08:31:06,337 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437044998 2024-11-24T08:31:06,337 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437058942 is not closed yet, will try archiving it next time 2024-11-24T08:31:06,346 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732437045890/Put/vlen=218/seqid=0] 2024-11-24T08:31:06,346 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732437055614/Put/vlen=1045/seqid=0] 2024-11-24T08:31:06,346 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437044998 2024-11-24T08:31:06,346 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437058942 2024-11-24T08:31:06,346 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437058942 2024-11-24T08:31:06,347 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437058942 after 1ms 2024-11-24T08:31:06,347 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437058942 2024-11-24T08:31:06,350 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732437058941/Put/vlen=1045/seqid=0] 2024-11-24T08:31:06,350 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732437060984/Put/vlen=1045/seqid=0] 2024-11-24T08:31:06,350 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437058942 2024-11-24T08:31:06,350 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437064317 2024-11-24T08:31:06,350 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437064317 2024-11-24T08:31:06,351 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437064317 after 1ms 2024-11-24T08:31:06,351 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437064317 2024-11-24T08:31:06,354 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732437064316/Put/vlen=1045/seqid=0] 2024-11-24T08:31:06,354 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437066326 2024-11-24T08:31:06,354 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437066326 2024-11-24T08:31:06,354 WARN [IPC Server handler 2 on default port 40387 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437066326 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-24T08:31:06,354 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437066326 after 0ms 2024-11-24T08:31:07,226 WARN [ResponseProcessor for block BP-1594007340-172.17.0.2-1732437043875:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1594007340-172.17.0.2-1732437043875:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:31:07,226 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-195191428_22 at /127.0.0.1:52622 [Receiving block BP-1594007340-172.17.0.2-1732437043875:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:39995:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52622 dst: /127.0.0.1:39995 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:39995 remote=/127.0.0.1:52622]. Total timeout mills is 60000, 59108 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:31:07,226 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-195191428_22 at /127.0.0.1:40448 [Receiving block BP-1594007340-172.17.0.2-1732437043875:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:33811:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40448 dst: /127.0.0.1:33811 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:31:07,226 WARN [DataStreamer for file /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437066326 block BP-1594007340-172.17.0.2-1732437043875:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1594007340-172.17.0.2-1732437043875:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39995,DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d,DISK], DatanodeInfoWithStorage[127.0.0.1:33811,DS-6e56c470-34a4-4e42-97ab-b35bd3595cc0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39995,DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d,DISK]) is bad. 2024-11-24T08:31:07,227 WARN [DataStreamer for file /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437066326 block BP-1594007340-172.17.0.2-1732437043875:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1594007340-172.17.0.2-1732437043875:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:31:07,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39995 is added to blk_1073741839_1022 (size=85) 2024-11-24T08:31:07,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741839_1022 (size=85) 2024-11-24T08:31:07,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:07,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:08,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:08,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:08,326 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437058942 after 4002ms 2024-11-24T08:31:09,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:09,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:10,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:10,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:10,355 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437066326 after 4001ms 2024-11-24T08:31:10,355 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437066326 2024-11-24T08:31:10,359 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437066326 2024-11-24T08:31:10,359 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 90aebe4192f3f9004eb2e133a8c9b599 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-24T08:31:10,360 ERROR [FSHLog-0-hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47-prefix:30c28c82771d,33921,1732437044607 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1594007340-172.17.0.2-1732437043875:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:31:10,360 WARN [FSHLog-0-hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47-prefix:30c28c82771d,33921,1732437044607 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1594007340-172.17.0.2-1732437043875:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:31:10,360 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30c28c82771d%2C33921%2C1732437044607:(num 1732437066326) roll requested 2024-11-24T08:31:10,360 INFO [regionserver/30c28c82771d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C33921%2C1732437044607.1732437070360 2024-11-24T08:31:10,365 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437066326 newFile=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437070360 2024-11-24T08:31:10,365 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:10,366 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:10,366 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:10,366 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:10,366 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:10,366 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437066326 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437070360 2024-11-24T08:31:10,366 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1594007340-172.17.0.2-1732437043875:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:31:10,367 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36137:36137),(127.0.0.1/127.0.0.1:38385:38385)] 2024-11-24T08:31:10,367 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437066326 is not closed yet, will try archiving it next time 2024-11-24T08:31:10,367 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1594007340-172.17.0.2-1732437043875:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:31:10,367 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437066326 2024-11-24T08:31:10,367 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437066326 after 0ms 2024-11-24T08:31:10,368 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.1732437066326 to hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/oldWALs/30c28c82771d%2C33921%2C1732437044607.1732437066326 2024-11-24T08:31:10,387 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/default/TestLogRolling-testLogRollOnPipelineRestart/90aebe4192f3f9004eb2e133a8c9b599/.tmp/info/bb6c165b1740457baa4aade9d0265548 is 1080, key is row1002/info:/1732437055614/Put/seqid=0 2024-11-24T08:31:10,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741841_1024 (size=9270) 2024-11-24T08:31:10,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39995 is added to blk_1073741841_1024 (size=9270) 2024-11-24T08:31:10,392 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/default/TestLogRolling-testLogRollOnPipelineRestart/90aebe4192f3f9004eb2e133a8c9b599/.tmp/info/bb6c165b1740457baa4aade9d0265548 2024-11-24T08:31:10,399 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/default/TestLogRolling-testLogRollOnPipelineRestart/90aebe4192f3f9004eb2e133a8c9b599/.tmp/info/bb6c165b1740457baa4aade9d0265548 as hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/default/TestLogRolling-testLogRollOnPipelineRestart/90aebe4192f3f9004eb2e133a8c9b599/info/bb6c165b1740457baa4aade9d0265548 2024-11-24T08:31:10,404 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/default/TestLogRolling-testLogRollOnPipelineRestart/90aebe4192f3f9004eb2e133a8c9b599/info/bb6c165b1740457baa4aade9d0265548, entries=4, sequenceid=8, filesize=9.1 K 2024-11-24T08:31:10,405 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 90aebe4192f3f9004eb2e133a8c9b599 in 46ms, sequenceid=8, compaction requested=false 2024-11-24T08:31:10,405 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 90aebe4192f3f9004eb2e133a8c9b599: 2024-11-24T08:31:10,406 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-24T08:31:10,406 ERROR [FSHLog-0-hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47-prefix:30c28c82771d,33921,1732437044607.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44343,DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:31:10,406 WARN [FSHLog-0-hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47-prefix:30c28c82771d,33921,1732437044607.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44343,DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:31:10,406 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 30c28c82771d%2C33921%2C1732437044607.meta:.meta(num 1732437045378) roll requested 2024-11-24T08:31:10,406 INFO [regionserver/30c28c82771d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C33921%2C1732437044607.meta.1732437070406.meta 2024-11-24T08:31:10,411 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:10,411 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:10,411 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:10,411 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:10,411 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:10,411 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.meta.1732437045378.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.meta.1732437070406.meta 2024-11-24T08:31:10,412 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44343,DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:31:10,412 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44343,DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:31:10,412 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.meta.1732437045378.meta 2024-11-24T08:31:10,412 WARN [IPC Server handler 4 on default port 40387 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.meta.1732437045378.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1014 2024-11-24T08:31:10,412 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.meta.1732437045378.meta after 0ms 2024-11-24T08:31:10,416 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38385:38385),(127.0.0.1/127.0.0.1:36137:36137)] 2024-11-24T08:31:10,416 DEBUG [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.meta.1732437045378.meta is not closed yet, will try archiving it next time 2024-11-24T08:31:10,432 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740/.tmp/info/fadfa4b5f045484fb293155391b428d1 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599./info:regioninfo/1732437045894/Put/seqid=0 2024-11-24T08:31:10,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39995 is added to blk_1073741843_1027 (size=7125) 2024-11-24T08:31:10,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741843_1027 (size=7125) 2024-11-24T08:31:10,438 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740/.tmp/info/fadfa4b5f045484fb293155391b428d1 2024-11-24T08:31:10,457 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740/.tmp/ns/ff91b64e6cb441a490c4d066df9bce18 is 43, key is default/ns:d/1732437045419/Put/seqid=0 2024-11-24T08:31:10,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39995 is added to blk_1073741844_1028 (size=5153) 2024-11-24T08:31:10,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741844_1028 (size=5153) 2024-11-24T08:31:10,462 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740/.tmp/ns/ff91b64e6cb441a490c4d066df9bce18 2024-11-24T08:31:10,481 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740/.tmp/table/a02b745e05a34ef58f89de1cd55527f5 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732437045904/Put/seqid=0 2024-11-24T08:31:10,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741845_1029 (size=5438) 2024-11-24T08:31:10,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39995 is added to blk_1073741845_1029 (size=5438) 2024-11-24T08:31:10,486 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740/.tmp/table/a02b745e05a34ef58f89de1cd55527f5 2024-11-24T08:31:10,491 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740/.tmp/info/fadfa4b5f045484fb293155391b428d1 as hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740/info/fadfa4b5f045484fb293155391b428d1 2024-11-24T08:31:10,496 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740/info/fadfa4b5f045484fb293155391b428d1, entries=10, sequenceid=11, filesize=7.0 K 2024-11-24T08:31:10,497 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740/.tmp/ns/ff91b64e6cb441a490c4d066df9bce18 as hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740/ns/ff91b64e6cb441a490c4d066df9bce18 2024-11-24T08:31:10,501 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740/ns/ff91b64e6cb441a490c4d066df9bce18, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T08:31:10,502 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740/.tmp/table/a02b745e05a34ef58f89de1cd55527f5 as hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740/table/a02b745e05a34ef58f89de1cd55527f5 2024-11-24T08:31:10,506 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740/table/a02b745e05a34ef58f89de1cd55527f5, entries=2, sequenceid=11, filesize=5.3 K 2024-11-24T08:31:10,508 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 102ms, sequenceid=11, compaction requested=false 2024-11-24T08:31:10,508 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-24T08:31:10,513 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T08:31:10,513 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:31:10,513 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:31:10,513 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:31:10,513 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:31:10,513 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T08:31:10,513 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T08:31:10,513 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2017098853, stopped=false 2024-11-24T08:31:10,513 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=30c28c82771d,42583,1732437044562 2024-11-24T08:31:10,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:31:10,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33921-0x101491957d30001, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:31:10,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:31:10,515 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:31:10,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33921-0x101491957d30001, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:31:10,516 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:31:10,516 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:31:10,516 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:31:10,516 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '30c28c82771d,33921,1732437044607' ***** 2024-11-24T08:31:10,516 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T08:31:10,516 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:31:10,516 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33921-0x101491957d30001, quorum=127.0.0.1:51402, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:31:10,516 INFO [RS:0;30c28c82771d:33921 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T08:31:10,516 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T08:31:10,516 INFO [RS:0;30c28c82771d:33921 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T08:31:10,517 INFO [RS:0;30c28c82771d:33921 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T08:31:10,517 INFO [RS:0;30c28c82771d:33921 {}] regionserver.HRegionServer(3091): Received CLOSE for 90aebe4192f3f9004eb2e133a8c9b599 2024-11-24T08:31:10,517 INFO [RS:0;30c28c82771d:33921 {}] regionserver.HRegionServer(959): stopping server 30c28c82771d,33921,1732437044607 2024-11-24T08:31:10,517 INFO [RS:0;30c28c82771d:33921 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:31:10,517 INFO [RS:0;30c28c82771d:33921 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;30c28c82771d:33921. 2024-11-24T08:31:10,517 DEBUG [RS:0;30c28c82771d:33921 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:31:10,517 DEBUG [RS:0;30c28c82771d:33921 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:31:10,517 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 90aebe4192f3f9004eb2e133a8c9b599, disabling compactions & flushes 2024-11-24T08:31:10,517 INFO [RS:0;30c28c82771d:33921 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T08:31:10,517 INFO [RS:0;30c28c82771d:33921 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T08:31:10,517 INFO [RS:0;30c28c82771d:33921 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T08:31:10,517 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599. 2024-11-24T08:31:10,517 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599. 2024-11-24T08:31:10,517 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599. after waiting 0 ms 2024-11-24T08:31:10,517 INFO [RS:0;30c28c82771d:33921 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T08:31:10,517 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599. 2024-11-24T08:31:10,518 INFO [RS:0;30c28c82771d:33921 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T08:31:10,518 DEBUG [RS:0;30c28c82771d:33921 {}] regionserver.HRegionServer(1325): Online Regions={90aebe4192f3f9004eb2e133a8c9b599=TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599., 1588230740=hbase:meta,,1.1588230740} 2024-11-24T08:31:10,518 DEBUG [RS:0;30c28c82771d:33921 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 90aebe4192f3f9004eb2e133a8c9b599 2024-11-24T08:31:10,518 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:31:10,518 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:31:10,518 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:31:10,518 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:31:10,518 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:31:10,522 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/default/TestLogRolling-testLogRollOnPipelineRestart/90aebe4192f3f9004eb2e133a8c9b599/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-24T08:31:10,523 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T08:31:10,523 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599. 2024-11-24T08:31:10,523 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:31:10,523 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 90aebe4192f3f9004eb2e133a8c9b599: Waiting for close lock at 1732437070517Running coprocessor pre-close hooks at 1732437070517Disabling compacts and flushes for region at 1732437070517Disabling writes for close at 1732437070517Writing region close event to WAL at 1732437070518 (+1 ms)Running coprocessor post-close hooks at 1732437070523 (+5 ms)Closed at 1732437070523 2024-11-24T08:31:10,523 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:31:10,523 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732437070518Running coprocessor pre-close hooks at 1732437070518Disabling compacts and flushes for region at 1732437070518Disabling writes for close at 1732437070518Writing region close event to WAL at 1732437070519 (+1 ms)Running coprocessor post-close hooks at 1732437070523 (+4 ms)Closed at 1732437070523 2024-11-24T08:31:10,523 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732437045538.90aebe4192f3f9004eb2e133a8c9b599. 2024-11-24T08:31:10,524 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T08:31:10,718 INFO [RS:0;30c28c82771d:33921 {}] regionserver.HRegionServer(976): stopping server 30c28c82771d,33921,1732437044607; all regions closed. 2024-11-24T08:31:10,719 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:10,719 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:10,719 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:10,719 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:10,719 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:10,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741842_1025 (size=825) 2024-11-24T08:31:10,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39995 is added to blk_1073741842_1025 (size=825) 2024-11-24T08:31:10,856 INFO [regionserver/30c28c82771d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T08:31:10,856 INFO [regionserver/30c28c82771d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T08:31:10,857 INFO [regionserver/30c28c82771d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:31:11,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:11,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:12,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:12,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:13,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:13,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:14,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:14,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:14,379 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T08:31:14,413 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.meta.1732437045378.meta after 4001ms 2024-11-24T08:31:14,414 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/WALs/30c28c82771d,33921,1732437044607/30c28c82771d%2C33921%2C1732437044607.meta.1732437045378.meta to hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/oldWALs/30c28c82771d%2C33921%2C1732437044607.meta.1732437045378.meta 2024-11-24T08:31:14,416 DEBUG [RS:0;30c28c82771d:33921 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/oldWALs 2024-11-24T08:31:14,416 INFO [RS:0;30c28c82771d:33921 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30c28c82771d%2C33921%2C1732437044607.meta:.meta(num 1732437070406) 2024-11-24T08:31:14,416 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:14,417 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:14,417 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:14,417 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:14,417 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:14,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39995 is added to blk_1073741840_1023 (size=1162) 2024-11-24T08:31:14,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741840_1023 (size=1162) 2024-11-24T08:31:14,423 DEBUG [RS:0;30c28c82771d:33921 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/oldWALs 2024-11-24T08:31:14,423 INFO [RS:0;30c28c82771d:33921 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30c28c82771d%2C33921%2C1732437044607:(num 1732437070360) 2024-11-24T08:31:14,423 DEBUG [RS:0;30c28c82771d:33921 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:31:14,423 INFO [RS:0;30c28c82771d:33921 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:31:14,423 INFO [RS:0;30c28c82771d:33921 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:31:14,423 INFO [RS:0;30c28c82771d:33921 {}] hbase.ChoreService(370): Chore service for: regionserver/30c28c82771d:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T08:31:14,423 INFO [RS:0;30c28c82771d:33921 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:31:14,423 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:31:14,424 INFO [RS:0;30c28c82771d:33921 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33921 2024-11-24T08:31:14,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33921-0x101491957d30001, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/30c28c82771d,33921,1732437044607 2024-11-24T08:31:14,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:31:14,427 INFO [RS:0;30c28c82771d:33921 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:31:14,429 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [30c28c82771d,33921,1732437044607] 2024-11-24T08:31:14,431 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/30c28c82771d,33921,1732437044607 already deleted, retry=false 2024-11-24T08:31:14,431 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 30c28c82771d,33921,1732437044607 expired; onlineServers=0 2024-11-24T08:31:14,431 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '30c28c82771d,42583,1732437044562' ***** 2024-11-24T08:31:14,431 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T08:31:14,431 INFO [M:0;30c28c82771d:42583 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:31:14,431 INFO [M:0;30c28c82771d:42583 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:31:14,431 DEBUG [M:0;30c28c82771d:42583 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T08:31:14,431 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T08:31:14,431 DEBUG [M:0;30c28c82771d:42583 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T08:31:14,431 DEBUG [master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732437044773 {}] cleaner.HFileCleaner(306): Exit Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732437044773,5,FailOnTimeoutGroup] 2024-11-24T08:31:14,431 DEBUG [master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732437044773 {}] cleaner.HFileCleaner(306): Exit Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732437044773,5,FailOnTimeoutGroup] 2024-11-24T08:31:14,431 INFO [M:0;30c28c82771d:42583 {}] hbase.ChoreService(370): Chore service for: master/30c28c82771d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T08:31:14,431 INFO [M:0;30c28c82771d:42583 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:31:14,431 DEBUG [M:0;30c28c82771d:42583 {}] master.HMaster(1795): Stopping service threads 2024-11-24T08:31:14,431 INFO [M:0;30c28c82771d:42583 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T08:31:14,432 INFO [M:0;30c28c82771d:42583 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:31:14,432 INFO [M:0;30c28c82771d:42583 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T08:31:14,432 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T08:31:14,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T08:31:14,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:31:14,432 DEBUG [M:0;30c28c82771d:42583 {}] zookeeper.ZKUtil(347): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T08:31:14,432 WARN [M:0;30c28c82771d:42583 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T08:31:14,433 INFO [M:0;30c28c82771d:42583 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/.lastflushedseqids 2024-11-24T08:31:14,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39995 is added to blk_1073741846_1030 (size=130) 2024-11-24T08:31:14,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741846_1030 (size=130) 2024-11-24T08:31:14,440 INFO [M:0;30c28c82771d:42583 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T08:31:14,440 INFO [M:0;30c28c82771d:42583 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T08:31:14,440 DEBUG [M:0;30c28c82771d:42583 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:31:14,440 INFO [M:0;30c28c82771d:42583 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:31:14,440 DEBUG [M:0;30c28c82771d:42583 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:31:14,440 DEBUG [M:0;30c28c82771d:42583 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:31:14,440 DEBUG [M:0;30c28c82771d:42583 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:31:14,440 INFO [M:0;30c28c82771d:42583 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-11-24T08:31:14,441 ERROR [FSHLog-0-hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData-prefix:30c28c82771d,42583,1732437044562 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44343,DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:31:14,441 WARN [FSHLog-0-hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData-prefix:30c28c82771d,42583,1732437044562 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44343,DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:31:14,441 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 30c28c82771d%2C42583%2C1732437044562:(num 1732437044691) roll requested 2024-11-24T08:31:14,441 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C42583%2C1732437044562.1732437074441 2024-11-24T08:31:14,446 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:14,446 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:14,446 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:14,446 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:14,446 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:14,446 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/WALs/30c28c82771d,42583,1732437044562/30c28c82771d%2C42583%2C1732437044562.1732437044691 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/WALs/30c28c82771d,42583,1732437044562/30c28c82771d%2C42583%2C1732437044562.1732437074441 2024-11-24T08:31:14,446 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44343,DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:31:14,446 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44343,DS-54179d18-aa4a-4beb-bdd6-def97c7d0a2d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T08:31:14,447 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/WALs/30c28c82771d,42583,1732437044562/30c28c82771d%2C42583%2C1732437044562.1732437044691 2024-11-24T08:31:14,447 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38385:38385),(127.0.0.1/127.0.0.1:36137:36137)] 2024-11-24T08:31:14,447 WARN [IPC Server handler 3 on default port 40387 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/WALs/30c28c82771d,42583,1732437044562/30c28c82771d%2C42583%2C1732437044562.1732437044691 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-24T08:31:14,447 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/WALs/30c28c82771d,42583,1732437044562/30c28c82771d%2C42583%2C1732437044562.1732437044691 is not closed yet, will try archiving it next time 2024-11-24T08:31:14,447 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/WALs/30c28c82771d,42583,1732437044562/30c28c82771d%2C42583%2C1732437044562.1732437044691 after 0ms 2024-11-24T08:31:14,462 DEBUG [M:0;30c28c82771d:42583 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1b61ed245861484584b9a3e4d92d9cf3 is 82, key is hbase:meta,,1/info:regioninfo/1732437045402/Put/seqid=0 2024-11-24T08:31:14,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39995 is added to blk_1073741848_1033 (size=5672) 2024-11-24T08:31:14,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741848_1033 (size=5672) 2024-11-24T08:31:14,467 INFO [M:0;30c28c82771d:42583 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1b61ed245861484584b9a3e4d92d9cf3 2024-11-24T08:31:14,487 DEBUG [M:0;30c28c82771d:42583 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b432a1ed737941468164d542c35128ac is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732437045909/Put/seqid=0 2024-11-24T08:31:14,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741849_1034 (size=6117) 2024-11-24T08:31:14,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39995 is added to blk_1073741849_1034 (size=6117) 2024-11-24T08:31:14,493 INFO [M:0;30c28c82771d:42583 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b432a1ed737941468164d542c35128ac 2024-11-24T08:31:14,512 DEBUG [M:0;30c28c82771d:42583 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9ee7ba5814c5493f824993d7113ce32d is 69, key is 30c28c82771d,33921,1732437044607/rs:state/1732437044843/Put/seqid=0 2024-11-24T08:31:14,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39995 is added to blk_1073741850_1035 (size=5156) 2024-11-24T08:31:14,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741850_1035 (size=5156) 2024-11-24T08:31:14,517 INFO [M:0;30c28c82771d:42583 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9ee7ba5814c5493f824993d7113ce32d 2024-11-24T08:31:14,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33921-0x101491957d30001, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:31:14,530 INFO [RS:0;30c28c82771d:33921 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:31:14,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33921-0x101491957d30001, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:31:14,530 INFO [RS:0;30c28c82771d:33921 {}] regionserver.HRegionServer(1031): Exiting; stopping=30c28c82771d,33921,1732437044607; zookeeper connection closed. 2024-11-24T08:31:14,530 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@37b3684f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@37b3684f 2024-11-24T08:31:14,530 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T08:31:14,536 DEBUG [M:0;30c28c82771d:42583 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/54913d77016246cfad1bed3e1110c31a is 52, key is load_balancer_on/state:d/1732437045534/Put/seqid=0 2024-11-24T08:31:14,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39995 is added to blk_1073741851_1036 (size=5056) 2024-11-24T08:31:14,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741851_1036 (size=5056) 2024-11-24T08:31:14,541 INFO [M:0;30c28c82771d:42583 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/54913d77016246cfad1bed3e1110c31a 2024-11-24T08:31:14,545 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T08:31:14,546 DEBUG [M:0;30c28c82771d:42583 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1b61ed245861484584b9a3e4d92d9cf3 as hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1b61ed245861484584b9a3e4d92d9cf3 2024-11-24T08:31:14,550 INFO [M:0;30c28c82771d:42583 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1b61ed245861484584b9a3e4d92d9cf3, entries=8, sequenceid=56, filesize=5.5 K 2024-11-24T08:31:14,551 DEBUG [M:0;30c28c82771d:42583 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b432a1ed737941468164d542c35128ac as hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b432a1ed737941468164d542c35128ac 2024-11-24T08:31:14,556 INFO [M:0;30c28c82771d:42583 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b432a1ed737941468164d542c35128ac, entries=6, sequenceid=56, filesize=6.0 K 2024-11-24T08:31:14,557 DEBUG [M:0;30c28c82771d:42583 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9ee7ba5814c5493f824993d7113ce32d as hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9ee7ba5814c5493f824993d7113ce32d 2024-11-24T08:31:14,562 INFO [M:0;30c28c82771d:42583 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9ee7ba5814c5493f824993d7113ce32d, entries=1, sequenceid=56, filesize=5.0 K 2024-11-24T08:31:14,563 DEBUG [M:0;30c28c82771d:42583 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/54913d77016246cfad1bed3e1110c31a as hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/54913d77016246cfad1bed3e1110c31a 2024-11-24T08:31:14,568 INFO [M:0;30c28c82771d:42583 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/54913d77016246cfad1bed3e1110c31a, entries=1, sequenceid=56, filesize=4.9 K 2024-11-24T08:31:14,569 INFO [M:0;30c28c82771d:42583 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=56, compaction requested=false 2024-11-24T08:31:14,571 INFO [M:0;30c28c82771d:42583 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:31:14,571 DEBUG [M:0;30c28c82771d:42583 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732437074440Disabling compacts and flushes for region at 1732437074440Disabling writes for close at 1732437074440Obtaining lock to block concurrent updates at 1732437074440Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732437074440Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1732437074441 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732437074447 (+6 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732437074447Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732437074462 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732437074462Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732437074473 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732437074487 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732437074487Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732437074497 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732437074512 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732437074512Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732437074522 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732437074536 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732437074536Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d72180c: reopening flushed file at 1732437074545 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14618d50: reopening flushed file at 1732437074551 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@66353dfc: reopening flushed file at 1732437074556 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@525e073a: reopening flushed file at 1732437074562 (+6 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=56, compaction requested=false at 1732437074569 (+7 ms)Writing region close event to WAL at 1732437074571 (+2 ms)Closed at 1732437074571 2024-11-24T08:31:14,572 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:14,572 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:14,572 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:14,572 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:14,572 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:14,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33811 is added to blk_1073741847_1031 (size=757) 2024-11-24T08:31:14,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39995 is added to blk_1073741847_1031 (size=757) 2024-11-24T08:31:15,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39995 is added to blk_1073741830_1032 (size=27247) 2024-11-24T08:31:15,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:15,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:15,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:15,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:15,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:15,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:15,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:15,539 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:15,539 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:15,539 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:15,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:15,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:15,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:15,543 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:15,546 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:15,547 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:16,049 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T08:31:16,050 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:16,050 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:16,050 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:16,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:16,066 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:16,066 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:16,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:16,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:16,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:16,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:16,070 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:16,070 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:16,070 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:16,072 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:16,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:16,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:17,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:17,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:17,379 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T08:31:18,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:18,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:18,448 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/WALs/30c28c82771d,42583,1732437044562/30c28c82771d%2C42583%2C1732437044562.1732437044691 after 4001ms 2024-11-24T08:31:18,448 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/WALs/30c28c82771d,42583,1732437044562/30c28c82771d%2C42583%2C1732437044562.1732437044691 to hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/oldWALs/30c28c82771d%2C42583%2C1732437044562.1732437044691 2024-11-24T08:31:18,451 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/MasterData/oldWALs/30c28c82771d%2C42583%2C1732437044562.1732437044691 to hdfs://localhost:40387/user/jenkins/test-data/6dfa0ded-1bc8-1e78-3595-f14474392f47/oldWALs/30c28c82771d%2C42583%2C1732437044562.1732437044691$masterlocalwal$ 2024-11-24T08:31:18,452 INFO [M:0;30c28c82771d:42583 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T08:31:18,452 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:31:18,452 INFO [M:0;30c28c82771d:42583 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42583 2024-11-24T08:31:18,452 INFO [M:0;30c28c82771d:42583 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:31:18,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:31:18,554 INFO [M:0;30c28c82771d:42583 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:31:18,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42583-0x101491957d30000, quorum=127.0.0.1:51402, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:31:18,556 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@787041af{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:31:18,556 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7269a538{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:31:18,556 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:31:18,557 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64e298ce{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:31:18,557 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16a09209{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/hadoop.log.dir/,STOPPED} 2024-11-24T08:31:18,558 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:31:18,558 WARN [BP-1594007340-172.17.0.2-1732437043875 heartbeating to localhost/127.0.0.1:40387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:31:18,558 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:31:18,558 WARN [BP-1594007340-172.17.0.2-1732437043875 heartbeating to localhost/127.0.0.1:40387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1594007340-172.17.0.2-1732437043875 (Datanode Uuid acbfbffc-88f0-4344-8543-d929848a9b72) service to localhost/127.0.0.1:40387 2024-11-24T08:31:18,558 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/cluster_62170018-0ac2-279e-461e-d3edd4af842b/data/data3/current/BP-1594007340-172.17.0.2-1732437043875 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:31:18,559 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/cluster_62170018-0ac2-279e-461e-d3edd4af842b/data/data4/current/BP-1594007340-172.17.0.2-1732437043875 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:31:18,559 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:31:18,561 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5aaed393{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:31:18,561 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7939cb3e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:31:18,561 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:31:18,561 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49f94f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:31:18,561 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ae00177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/hadoop.log.dir/,STOPPED} 2024-11-24T08:31:18,562 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:31:18,562 WARN [BP-1594007340-172.17.0.2-1732437043875 heartbeating to localhost/127.0.0.1:40387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:31:18,562 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:31:18,562 WARN [BP-1594007340-172.17.0.2-1732437043875 heartbeating to localhost/127.0.0.1:40387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1594007340-172.17.0.2-1732437043875 (Datanode Uuid ab85ff76-27bc-469c-99a2-c05f1573df17) service to localhost/127.0.0.1:40387 2024-11-24T08:31:18,563 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/cluster_62170018-0ac2-279e-461e-d3edd4af842b/data/data1/current/BP-1594007340-172.17.0.2-1732437043875 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:31:18,563 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/cluster_62170018-0ac2-279e-461e-d3edd4af842b/data/data2/current/BP-1594007340-172.17.0.2-1732437043875 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:31:18,563 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:31:18,569 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5e97eedf{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:31:18,569 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f4376e6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:31:18,569 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:31:18,569 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@302bad64{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:31:18,569 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e84569b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/hadoop.log.dir/,STOPPED} 2024-11-24T08:31:18,575 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T08:31:18,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T08:31:18,600 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=181 (was 155) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40387 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40387 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40387 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40387 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40387 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:40387 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40387 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40387 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=456 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=113 (was 191), ProcessCount=11 (was 11), AvailableMemoryMB=6622 (was 6810) 2024-11-24T08:31:18,607 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=181, OpenFileDescriptor=456, MaxFileDescriptor=1048576, SystemLoadAverage=113, ProcessCount=11, AvailableMemoryMB=6621 2024-11-24T08:31:18,607 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T08:31:18,607 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/hadoop.log.dir so I do NOT create it in target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674 2024-11-24T08:31:18,607 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/787cb4ff-f6cc-44b9-bcfd-0e8ed53b20ce/hadoop.tmp.dir so I do NOT create it in target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674 2024-11-24T08:31:18,607 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/cluster_97aa70e0-3c6b-5aad-a9f6-a6267a8d6d24, deleteOnExit=true 2024-11-24T08:31:18,607 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T08:31:18,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/test.cache.data in system properties and HBase conf 2024-11-24T08:31:18,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T08:31:18,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/hadoop.log.dir in system properties and HBase conf 2024-11-24T08:31:18,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T08:31:18,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T08:31:18,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T08:31:18,608 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T08:31:18,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:31:18,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:31:18,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T08:31:18,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:31:18,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T08:31:18,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T08:31:18,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:31:18,609 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:31:18,609 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T08:31:18,609 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/nfs.dump.dir in system properties and HBase conf 2024-11-24T08:31:18,609 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/java.io.tmpdir in system properties and HBase conf 2024-11-24T08:31:18,609 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:31:18,609 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T08:31:18,609 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T08:31:18,622 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:31:18,687 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:31:18,691 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:31:18,692 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:31:18,692 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:31:18,692 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:31:18,693 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:31:18,693 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2712345{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:31:18,694 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6df6a7f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:31:18,806 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b4ea813{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/java.io.tmpdir/jetty-localhost-43527-hadoop-hdfs-3_4_1-tests_jar-_-any-17910774019018838144/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:31:18,807 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@113ad868{HTTP/1.1, (http/1.1)}{localhost:43527} 2024-11-24T08:31:18,807 INFO [Time-limited test {}] server.Server(415): Started @184930ms 2024-11-24T08:31:18,820 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:31:18,871 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:31:18,873 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:31:18,874 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:31:18,874 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:31:18,874 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:31:18,875 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@df163d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:31:18,875 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1367dc96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:31:18,990 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@206f042f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/java.io.tmpdir/jetty-localhost-37425-hadoop-hdfs-3_4_1-tests_jar-_-any-12048449291800407091/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:31:18,990 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@374dfdaf{HTTP/1.1, (http/1.1)}{localhost:37425} 2024-11-24T08:31:18,990 INFO [Time-limited test {}] server.Server(415): Started @185113ms 2024-11-24T08:31:18,991 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:31:19,021 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:31:19,023 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:31:19,024 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:31:19,024 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:31:19,024 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:31:19,025 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@507832d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:31:19,025 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@9b25e94{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:31:19,096 WARN [Thread-1636 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/cluster_97aa70e0-3c6b-5aad-a9f6-a6267a8d6d24/data/data1/current/BP-1368030800-172.17.0.2-1732437078639/current, will proceed with Du for space computation calculation, 2024-11-24T08:31:19,096 WARN [Thread-1637 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/cluster_97aa70e0-3c6b-5aad-a9f6-a6267a8d6d24/data/data2/current/BP-1368030800-172.17.0.2-1732437078639/current, will proceed with Du for space computation calculation, 2024-11-24T08:31:19,112 WARN [Thread-1615 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:31:19,115 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe6c947f8c906ba28 with lease ID 0x1ccb7c292f37cdf8: Processing first storage report for DS-f67ad1cb-2239-4f5a-bb4d-7fb82ac0804d from datanode DatanodeRegistration(127.0.0.1:41107, datanodeUuid=1e7fa05b-04db-40ea-ae37-b44b861b9dc6, infoPort=35759, infoSecurePort=0, ipcPort=39279, storageInfo=lv=-57;cid=testClusterID;nsid=2106693921;c=1732437078639) 2024-11-24T08:31:19,115 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe6c947f8c906ba28 with lease ID 0x1ccb7c292f37cdf8: from storage DS-f67ad1cb-2239-4f5a-bb4d-7fb82ac0804d node DatanodeRegistration(127.0.0.1:41107, datanodeUuid=1e7fa05b-04db-40ea-ae37-b44b861b9dc6, infoPort=35759, infoSecurePort=0, ipcPort=39279, storageInfo=lv=-57;cid=testClusterID;nsid=2106693921;c=1732437078639), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T08:31:19,115 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe6c947f8c906ba28 with lease ID 0x1ccb7c292f37cdf8: Processing first storage report for DS-74f2bdd0-a54c-4309-b566-4db24d443e2b from datanode DatanodeRegistration(127.0.0.1:41107, datanodeUuid=1e7fa05b-04db-40ea-ae37-b44b861b9dc6, infoPort=35759, infoSecurePort=0, ipcPort=39279, storageInfo=lv=-57;cid=testClusterID;nsid=2106693921;c=1732437078639) 2024-11-24T08:31:19,115 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe6c947f8c906ba28 with lease ID 0x1ccb7c292f37cdf8: from storage DS-74f2bdd0-a54c-4309-b566-4db24d443e2b node DatanodeRegistration(127.0.0.1:41107, datanodeUuid=1e7fa05b-04db-40ea-ae37-b44b861b9dc6, infoPort=35759, infoSecurePort=0, ipcPort=39279, storageInfo=lv=-57;cid=testClusterID;nsid=2106693921;c=1732437078639), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:31:19,146 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5aa33ca4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/java.io.tmpdir/jetty-localhost-34971-hadoop-hdfs-3_4_1-tests_jar-_-any-2391606914891155716/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:31:19,146 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3be31a0b{HTTP/1.1, (http/1.1)}{localhost:34971} 2024-11-24T08:31:19,146 INFO [Time-limited test {}] server.Server(415): Started @185269ms 2024-11-24T08:31:19,147 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:31:19,242 WARN [Thread-1662 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/cluster_97aa70e0-3c6b-5aad-a9f6-a6267a8d6d24/data/data3/current/BP-1368030800-172.17.0.2-1732437078639/current, will proceed with Du for space computation calculation, 2024-11-24T08:31:19,242 WARN [Thread-1663 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/cluster_97aa70e0-3c6b-5aad-a9f6-a6267a8d6d24/data/data4/current/BP-1368030800-172.17.0.2-1732437078639/current, will proceed with Du for space computation calculation, 2024-11-24T08:31:19,264 WARN [Thread-1651 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:31:19,266 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x58845f12865f065a with lease ID 0x1ccb7c292f37cdf9: Processing first storage report for DS-f4141c68-b4d7-4b52-aaf7-d57b3843b593 from datanode DatanodeRegistration(127.0.0.1:41379, datanodeUuid=e2934c93-d81b-4027-84bc-3fc9862443fa, infoPort=39209, infoSecurePort=0, ipcPort=43815, storageInfo=lv=-57;cid=testClusterID;nsid=2106693921;c=1732437078639) 2024-11-24T08:31:19,266 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x58845f12865f065a with lease ID 0x1ccb7c292f37cdf9: from storage DS-f4141c68-b4d7-4b52-aaf7-d57b3843b593 node DatanodeRegistration(127.0.0.1:41379, datanodeUuid=e2934c93-d81b-4027-84bc-3fc9862443fa, infoPort=39209, infoSecurePort=0, ipcPort=43815, storageInfo=lv=-57;cid=testClusterID;nsid=2106693921;c=1732437078639), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:31:19,266 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x58845f12865f065a with lease ID 0x1ccb7c292f37cdf9: Processing first storage report for DS-06443f92-8ca4-4e43-b4b0-10250ab97d86 from datanode DatanodeRegistration(127.0.0.1:41379, datanodeUuid=e2934c93-d81b-4027-84bc-3fc9862443fa, infoPort=39209, infoSecurePort=0, ipcPort=43815, storageInfo=lv=-57;cid=testClusterID;nsid=2106693921;c=1732437078639) 2024-11-24T08:31:19,266 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x58845f12865f065a with lease ID 0x1ccb7c292f37cdf9: from storage DS-06443f92-8ca4-4e43-b4b0-10250ab97d86 node DatanodeRegistration(127.0.0.1:41379, datanodeUuid=e2934c93-d81b-4027-84bc-3fc9862443fa, infoPort=39209, infoSecurePort=0, ipcPort=43815, storageInfo=lv=-57;cid=testClusterID;nsid=2106693921;c=1732437078639), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:31:19,269 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674 2024-11-24T08:31:19,271 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/cluster_97aa70e0-3c6b-5aad-a9f6-a6267a8d6d24/zookeeper_0, clientPort=51285, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/cluster_97aa70e0-3c6b-5aad-a9f6-a6267a8d6d24/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/cluster_97aa70e0-3c6b-5aad-a9f6-a6267a8d6d24/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T08:31:19,272 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51285 2024-11-24T08:31:19,272 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:31:19,273 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:31:19,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:31:19,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:31:19,284 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f with version=8 2024-11-24T08:31:19,284 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/hbase-staging 2024-11-24T08:31:19,286 INFO [Time-limited test {}] client.ConnectionUtils(128): master/30c28c82771d:0 server-side Connection retries=45 2024-11-24T08:31:19,286 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:31:19,286 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:31:19,286 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:31:19,286 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:31:19,286 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:31:19,286 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T08:31:19,286 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:31:19,286 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37865 2024-11-24T08:31:19,288 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37865 connecting to ZooKeeper ensemble=127.0.0.1:51285 2024-11-24T08:31:19,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:378650x0, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:31:19,293 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37865-0x1014919df760000 connected 2024-11-24T08:31:19,309 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:31:19,311 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:31:19,312 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:31:19,312 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f, hbase.cluster.distributed=false 2024-11-24T08:31:19,314 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:31:19,314 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37865 2024-11-24T08:31:19,314 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37865 2024-11-24T08:31:19,315 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37865 2024-11-24T08:31:19,315 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37865 2024-11-24T08:31:19,315 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37865 2024-11-24T08:31:19,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:19,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:19,330 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/30c28c82771d:0 server-side Connection retries=45 2024-11-24T08:31:19,330 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:31:19,330 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:31:19,330 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:31:19,330 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:31:19,330 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:31:19,330 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T08:31:19,330 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:31:19,331 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41593 2024-11-24T08:31:19,332 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41593 connecting to ZooKeeper ensemble=127.0.0.1:51285 2024-11-24T08:31:19,332 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:31:19,334 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:31:19,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:415930x0, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:31:19,337 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:415930x0, quorum=127.0.0.1:51285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:31:19,338 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41593-0x1014919df760001 connected 2024-11-24T08:31:19,338 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T08:31:19,339 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T08:31:19,339 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41593-0x1014919df760001, quorum=127.0.0.1:51285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T08:31:19,340 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41593-0x1014919df760001, quorum=127.0.0.1:51285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:31:19,340 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41593 2024-11-24T08:31:19,340 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41593 2024-11-24T08:31:19,341 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41593 2024-11-24T08:31:19,341 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41593 2024-11-24T08:31:19,341 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41593 2024-11-24T08:31:19,352 DEBUG [M:0;30c28c82771d:37865 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;30c28c82771d:37865 2024-11-24T08:31:19,353 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/30c28c82771d,37865,1732437079285 2024-11-24T08:31:19,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41593-0x1014919df760001, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:31:19,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:31:19,354 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/30c28c82771d,37865,1732437079285 2024-11-24T08:31:19,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41593-0x1014919df760001, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T08:31:19,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:31:19,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41593-0x1014919df760001, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:31:19,356 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T08:31:19,356 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/30c28c82771d,37865,1732437079285 from backup master directory 2024-11-24T08:31:19,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/30c28c82771d,37865,1732437079285 2024-11-24T08:31:19,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41593-0x1014919df760001, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:31:19,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:31:19,359 WARN [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:31:19,359 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=30c28c82771d,37865,1732437079285 2024-11-24T08:31:19,363 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/hbase.id] with ID: 06c34817-4860-4828-89bd-73f42e1cbafe 2024-11-24T08:31:19,363 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/.tmp/hbase.id 2024-11-24T08:31:19,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:31:19,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:31:19,369 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/.tmp/hbase.id]:[hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/hbase.id] 2024-11-24T08:31:19,380 INFO [master/30c28c82771d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:31:19,380 INFO [master/30c28c82771d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T08:31:19,381 INFO [master/30c28c82771d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-24T08:31:19,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:31:19,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41593-0x1014919df760001, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:31:19,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:31:19,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:31:19,389 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:31:19,390 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T08:31:19,390 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:31:19,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:31:19,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:31:19,397 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store 2024-11-24T08:31:19,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:31:19,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:31:19,403 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:31:19,403 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:31:19,403 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:31:19,403 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:31:19,403 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:31:19,403 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:31:19,403 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:31:19,403 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732437079403Disabling compacts and flushes for region at 1732437079403Disabling writes for close at 1732437079403Writing region close event to WAL at 1732437079403Closed at 1732437079403 2024-11-24T08:31:19,404 WARN [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/.initializing 2024-11-24T08:31:19,404 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/WALs/30c28c82771d,37865,1732437079285 2024-11-24T08:31:19,406 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C37865%2C1732437079285, suffix=, logDir=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/WALs/30c28c82771d,37865,1732437079285, archiveDir=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/oldWALs, maxLogs=10 2024-11-24T08:31:19,407 INFO [master/30c28c82771d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C37865%2C1732437079285.1732437079406 2024-11-24T08:31:19,411 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/WALs/30c28c82771d,37865,1732437079285/30c28c82771d%2C37865%2C1732437079285.1732437079406 2024-11-24T08:31:19,412 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35759:35759),(127.0.0.1/127.0.0.1:39209:39209)] 2024-11-24T08:31:19,412 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:31:19,412 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:31:19,413 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:31:19,413 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:31:19,414 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:31:19,415 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T08:31:19,415 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:31:19,415 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:31:19,415 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:31:19,416 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T08:31:19,416 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:31:19,417 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:31:19,417 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:31:19,418 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T08:31:19,418 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:31:19,418 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:31:19,419 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:31:19,420 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T08:31:19,420 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:31:19,420 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:31:19,420 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:31:19,421 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:31:19,421 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:31:19,422 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:31:19,422 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:31:19,423 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T08:31:19,424 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:31:19,426 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:31:19,426 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=718705, jitterRate=-0.08611972630023956}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T08:31:19,427 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732437079413Initializing all the Stores at 1732437079413Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437079413Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437079414 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437079414Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437079414Cleaning up temporary data from old regions at 1732437079422 (+8 ms)Region opened successfully at 1732437079427 (+5 ms) 2024-11-24T08:31:19,427 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T08:31:19,429 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51336322, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30c28c82771d/172.17.0.2:0 2024-11-24T08:31:19,430 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T08:31:19,430 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T08:31:19,431 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T08:31:19,431 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T08:31:19,431 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T08:31:19,431 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T08:31:19,431 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T08:31:19,433 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T08:31:19,434 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T08:31:19,435 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T08:31:19,435 INFO [master/30c28c82771d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T08:31:19,436 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T08:31:19,437 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T08:31:19,437 INFO [master/30c28c82771d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T08:31:19,438 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T08:31:19,445 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T08:31:19,446 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T08:31:19,447 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T08:31:19,448 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T08:31:19,449 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T08:31:19,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:31:19,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41593-0x1014919df760001, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:31:19,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:31:19,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41593-0x1014919df760001, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:31:19,451 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=30c28c82771d,37865,1732437079285, sessionid=0x1014919df760000, setting cluster-up flag (Was=false) 2024-11-24T08:31:19,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:31:19,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41593-0x1014919df760001, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:31:19,460 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T08:31:19,460 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30c28c82771d,37865,1732437079285 2024-11-24T08:31:19,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:31:19,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41593-0x1014919df760001, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:31:19,468 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T08:31:19,469 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30c28c82771d,37865,1732437079285 2024-11-24T08:31:19,470 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T08:31:19,471 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T08:31:19,471 INFO [master/30c28c82771d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T08:31:19,471 INFO [master/30c28c82771d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T08:31:19,472 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 30c28c82771d,37865,1732437079285 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T08:31:19,473 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:31:19,473 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:31:19,473 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:31:19,473 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:31:19,473 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/30c28c82771d:0, corePoolSize=10, maxPoolSize=10 2024-11-24T08:31:19,473 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:31:19,473 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/30c28c82771d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:31:19,473 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:31:19,474 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732437109474 2024-11-24T08:31:19,474 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T08:31:19,474 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T08:31:19,474 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T08:31:19,474 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T08:31:19,474 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T08:31:19,474 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T08:31:19,474 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:19,474 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:31:19,474 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T08:31:19,475 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T08:31:19,475 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T08:31:19,475 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T08:31:19,475 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T08:31:19,475 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T08:31:19,475 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732437079475,5,FailOnTimeoutGroup] 2024-11-24T08:31:19,475 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732437079475,5,FailOnTimeoutGroup] 2024-11-24T08:31:19,475 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:19,475 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T08:31:19,475 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:19,475 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:19,475 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:31:19,476 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T08:31:19,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:31:19,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:31:19,482 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T08:31:19,482 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f 2024-11-24T08:31:19,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:31:19,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:31:19,488 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:31:19,489 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:31:19,490 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:31:19,490 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:31:19,490 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:31:19,490 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:31:19,491 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:31:19,491 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:31:19,492 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:31:19,492 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:31:19,493 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:31:19,493 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:31:19,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:31:19,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:31:19,494 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:31:19,494 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:31:19,495 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:31:19,495 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:31:19,495 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740 2024-11-24T08:31:19,495 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740 2024-11-24T08:31:19,496 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:31:19,496 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:31:19,497 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:31:19,498 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:31:19,499 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:31:19,499 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=751407, jitterRate=-0.04453669488430023}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:31:19,500 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732437079488Initializing all the Stores at 1732437079489 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437079489Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437079489Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437079489Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437079489Cleaning up temporary data from old regions at 1732437079496 (+7 ms)Region opened successfully at 1732437079500 (+4 ms) 2024-11-24T08:31:19,500 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:31:19,500 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:31:19,500 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:31:19,500 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:31:19,500 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:31:19,501 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:31:19,501 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732437079500Disabling compacts and flushes for region at 1732437079500Disabling writes for close at 1732437079500Writing region close event to WAL at 1732437079500Closed at 1732437079500 2024-11-24T08:31:19,502 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:31:19,502 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T08:31:19,502 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T08:31:19,503 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:31:19,505 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T08:31:19,522 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:31:19,522 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T08:31:19,522 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T08:31:19,543 INFO [RS:0;30c28c82771d:41593 {}] regionserver.HRegionServer(746): ClusterId : 06c34817-4860-4828-89bd-73f42e1cbafe 2024-11-24T08:31:19,543 DEBUG [RS:0;30c28c82771d:41593 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T08:31:19,545 DEBUG [RS:0;30c28c82771d:41593 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T08:31:19,545 DEBUG [RS:0;30c28c82771d:41593 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T08:31:19,546 DEBUG [RS:0;30c28c82771d:41593 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T08:31:19,547 DEBUG [RS:0;30c28c82771d:41593 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@210d2a1d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30c28c82771d/172.17.0.2:0 2024-11-24T08:31:19,559 DEBUG [RS:0;30c28c82771d:41593 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;30c28c82771d:41593 2024-11-24T08:31:19,559 INFO [RS:0;30c28c82771d:41593 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T08:31:19,559 INFO [RS:0;30c28c82771d:41593 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T08:31:19,559 DEBUG [RS:0;30c28c82771d:41593 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T08:31:19,559 INFO [RS:0;30c28c82771d:41593 {}] regionserver.HRegionServer(2659): reportForDuty to master=30c28c82771d,37865,1732437079285 with port=41593, startcode=1732437079330 2024-11-24T08:31:19,560 DEBUG [RS:0;30c28c82771d:41593 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T08:31:19,562 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45353, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T08:31:19,562 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37865 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 30c28c82771d,41593,1732437079330 2024-11-24T08:31:19,562 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37865 {}] master.ServerManager(517): Registering regionserver=30c28c82771d,41593,1732437079330 2024-11-24T08:31:19,564 DEBUG [RS:0;30c28c82771d:41593 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f 2024-11-24T08:31:19,564 DEBUG [RS:0;30c28c82771d:41593 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44781 2024-11-24T08:31:19,564 DEBUG [RS:0;30c28c82771d:41593 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T08:31:19,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:31:19,567 DEBUG [RS:0;30c28c82771d:41593 {}] zookeeper.ZKUtil(111): regionserver:41593-0x1014919df760001, quorum=127.0.0.1:51285, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/30c28c82771d,41593,1732437079330 2024-11-24T08:31:19,567 WARN [RS:0;30c28c82771d:41593 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:31:19,567 INFO [RS:0;30c28c82771d:41593 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:31:19,567 DEBUG [RS:0;30c28c82771d:41593 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/WALs/30c28c82771d,41593,1732437079330 2024-11-24T08:31:19,567 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [30c28c82771d,41593,1732437079330] 2024-11-24T08:31:19,570 INFO [RS:0;30c28c82771d:41593 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T08:31:19,572 INFO [RS:0;30c28c82771d:41593 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T08:31:19,572 INFO [RS:0;30c28c82771d:41593 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T08:31:19,572 INFO [RS:0;30c28c82771d:41593 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:19,572 INFO [RS:0;30c28c82771d:41593 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T08:31:19,573 INFO [RS:0;30c28c82771d:41593 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T08:31:19,573 INFO [RS:0;30c28c82771d:41593 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:19,573 DEBUG [RS:0;30c28c82771d:41593 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:31:19,573 DEBUG [RS:0;30c28c82771d:41593 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:31:19,573 DEBUG [RS:0;30c28c82771d:41593 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:31:19,573 DEBUG [RS:0;30c28c82771d:41593 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:31:19,573 DEBUG [RS:0;30c28c82771d:41593 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:31:19,573 DEBUG [RS:0;30c28c82771d:41593 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/30c28c82771d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:31:19,574 DEBUG [RS:0;30c28c82771d:41593 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:31:19,574 DEBUG [RS:0;30c28c82771d:41593 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:31:19,574 DEBUG [RS:0;30c28c82771d:41593 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:31:19,574 DEBUG [RS:0;30c28c82771d:41593 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:31:19,574 DEBUG [RS:0;30c28c82771d:41593 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:31:19,574 DEBUG [RS:0;30c28c82771d:41593 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:31:19,574 DEBUG [RS:0;30c28c82771d:41593 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/30c28c82771d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:31:19,574 DEBUG [RS:0;30c28c82771d:41593 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:31:19,574 INFO [RS:0;30c28c82771d:41593 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:19,574 INFO [RS:0;30c28c82771d:41593 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:19,574 INFO [RS:0;30c28c82771d:41593 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:19,575 INFO [RS:0;30c28c82771d:41593 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:19,575 INFO [RS:0;30c28c82771d:41593 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:19,575 INFO [RS:0;30c28c82771d:41593 {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,41593,1732437079330-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:31:19,589 INFO [RS:0;30c28c82771d:41593 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T08:31:19,589 INFO [RS:0;30c28c82771d:41593 {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,41593,1732437079330-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:19,589 INFO [RS:0;30c28c82771d:41593 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:19,589 INFO [RS:0;30c28c82771d:41593 {}] regionserver.Replication(171): 30c28c82771d,41593,1732437079330 started 2024-11-24T08:31:19,603 INFO [RS:0;30c28c82771d:41593 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:19,603 INFO [RS:0;30c28c82771d:41593 {}] regionserver.HRegionServer(1482): Serving as 30c28c82771d,41593,1732437079330, RpcServer on 30c28c82771d/172.17.0.2:41593, sessionid=0x1014919df760001 2024-11-24T08:31:19,603 DEBUG [RS:0;30c28c82771d:41593 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T08:31:19,603 DEBUG [RS:0;30c28c82771d:41593 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 30c28c82771d,41593,1732437079330 2024-11-24T08:31:19,603 DEBUG [RS:0;30c28c82771d:41593 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30c28c82771d,41593,1732437079330' 2024-11-24T08:31:19,603 DEBUG [RS:0;30c28c82771d:41593 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T08:31:19,604 DEBUG [RS:0;30c28c82771d:41593 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T08:31:19,604 DEBUG [RS:0;30c28c82771d:41593 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T08:31:19,604 DEBUG [RS:0;30c28c82771d:41593 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T08:31:19,604 DEBUG [RS:0;30c28c82771d:41593 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 30c28c82771d,41593,1732437079330 2024-11-24T08:31:19,604 DEBUG [RS:0;30c28c82771d:41593 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30c28c82771d,41593,1732437079330' 2024-11-24T08:31:19,604 DEBUG [RS:0;30c28c82771d:41593 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T08:31:19,605 DEBUG [RS:0;30c28c82771d:41593 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T08:31:19,605 DEBUG [RS:0;30c28c82771d:41593 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T08:31:19,605 INFO [RS:0;30c28c82771d:41593 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T08:31:19,605 INFO [RS:0;30c28c82771d:41593 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T08:31:19,655 WARN [30c28c82771d:37865 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T08:31:19,707 INFO [RS:0;30c28c82771d:41593 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C41593%2C1732437079330, suffix=, logDir=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/WALs/30c28c82771d,41593,1732437079330, archiveDir=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/oldWALs, maxLogs=32 2024-11-24T08:31:19,707 INFO [RS:0;30c28c82771d:41593 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C41593%2C1732437079330.1732437079707 2024-11-24T08:31:19,713 INFO [RS:0;30c28c82771d:41593 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/WALs/30c28c82771d,41593,1732437079330/30c28c82771d%2C41593%2C1732437079330.1732437079707 2024-11-24T08:31:19,717 DEBUG [RS:0;30c28c82771d:41593 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39209:39209),(127.0.0.1/127.0.0.1:35759:35759)] 2024-11-24T08:31:19,905 DEBUG [30c28c82771d:37865 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T08:31:19,906 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=30c28c82771d,41593,1732437079330 2024-11-24T08:31:19,907 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30c28c82771d,41593,1732437079330, state=OPENING 2024-11-24T08:31:19,908 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T08:31:19,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41593-0x1014919df760001, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:31:19,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:31:19,910 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:31:19,910 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=30c28c82771d,41593,1732437079330}] 2024-11-24T08:31:19,910 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:31:19,910 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:31:20,063 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T08:31:20,065 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55881, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T08:31:20,068 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T08:31:20,068 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:31:20,070 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C41593%2C1732437079330.meta, suffix=.meta, logDir=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/WALs/30c28c82771d,41593,1732437079330, archiveDir=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/oldWALs, maxLogs=32 2024-11-24T08:31:20,070 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C41593%2C1732437079330.meta.1732437080070.meta 2024-11-24T08:31:20,075 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/WALs/30c28c82771d,41593,1732437079330/30c28c82771d%2C41593%2C1732437079330.meta.1732437080070.meta 2024-11-24T08:31:20,079 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39209:39209),(127.0.0.1/127.0.0.1:35759:35759)] 2024-11-24T08:31:20,080 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:31:20,080 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T08:31:20,080 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T08:31:20,080 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T08:31:20,080 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T08:31:20,080 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:31:20,081 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T08:31:20,081 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T08:31:20,082 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:31:20,082 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:31:20,083 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:31:20,083 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:31:20,083 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:31:20,084 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:31:20,084 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:31:20,084 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:31:20,084 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:31:20,085 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:31:20,085 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:31:20,085 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:31:20,085 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:31:20,086 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:31:20,086 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:31:20,087 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:31:20,087 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:31:20,087 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740 2024-11-24T08:31:20,088 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740 2024-11-24T08:31:20,089 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:31:20,089 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:31:20,090 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:31:20,091 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:31:20,091 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=854077, jitterRate=0.08601538836956024}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:31:20,091 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T08:31:20,092 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732437080081Writing region info on filesystem at 1732437080081Initializing all the Stores at 1732437080081Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437080081Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437080082 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437080082Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437080082Cleaning up temporary data from old regions at 1732437080089 (+7 ms)Running coprocessor post-open hooks at 1732437080091 (+2 ms)Region opened successfully at 1732437080092 (+1 ms) 2024-11-24T08:31:20,093 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732437080063 2024-11-24T08:31:20,095 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T08:31:20,095 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T08:31:20,096 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=30c28c82771d,41593,1732437079330 2024-11-24T08:31:20,097 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30c28c82771d,41593,1732437079330, state=OPEN 2024-11-24T08:31:20,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41593-0x1014919df760001, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:31:20,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:31:20,102 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:31:20,102 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:31:20,102 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=30c28c82771d,41593,1732437079330 2024-11-24T08:31:20,105 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T08:31:20,105 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=30c28c82771d,41593,1732437079330 in 192 msec 2024-11-24T08:31:20,108 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T08:31:20,108 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 603 msec 2024-11-24T08:31:20,108 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:31:20,108 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T08:31:20,110 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:31:20,110 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30c28c82771d,41593,1732437079330, seqNum=-1] 2024-11-24T08:31:20,110 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:31:20,111 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46131, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:31:20,117 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 645 msec 2024-11-24T08:31:20,117 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732437080117, completionTime=-1 2024-11-24T08:31:20,117 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T08:31:20,117 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T08:31:20,118 INFO [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T08:31:20,118 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732437140118 2024-11-24T08:31:20,118 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732437200118 2024-11-24T08:31:20,118 INFO [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-24T08:31:20,119 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,37865,1732437079285-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:20,119 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,37865,1732437079285-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:20,119 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,37865,1732437079285-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:20,119 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-30c28c82771d:37865, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:20,119 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:20,119 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:20,121 DEBUG [master/30c28c82771d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T08:31:20,122 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.763sec 2024-11-24T08:31:20,122 INFO [master/30c28c82771d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T08:31:20,122 INFO [master/30c28c82771d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T08:31:20,122 INFO [master/30c28c82771d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T08:31:20,123 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T08:31:20,123 INFO [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T08:31:20,123 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,37865,1732437079285-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:31:20,123 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,37865,1732437079285-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T08:31:20,125 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T08:31:20,125 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T08:31:20,125 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,37865,1732437079285-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:31:20,143 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29c969a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:31:20,143 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 30c28c82771d,37865,-1 for getting cluster id 2024-11-24T08:31:20,143 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T08:31:20,145 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '06c34817-4860-4828-89bd-73f42e1cbafe' 2024-11-24T08:31:20,146 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T08:31:20,146 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "06c34817-4860-4828-89bd-73f42e1cbafe" 2024-11-24T08:31:20,146 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ae84afa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:31:20,146 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [30c28c82771d,37865,-1] 2024-11-24T08:31:20,146 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T08:31:20,146 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:31:20,147 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34762, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T08:31:20,148 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77b33594, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:31:20,149 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:31:20,149 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30c28c82771d,41593,1732437079330, seqNum=-1] 2024-11-24T08:31:20,150 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:31:20,151 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37936, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:31:20,152 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=30c28c82771d,37865,1732437079285 2024-11-24T08:31:20,153 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:31:20,155 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T08:31:20,155 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T08:31:20,156 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 30c28c82771d,37865,1732437079285 2024-11-24T08:31:20,156 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4805cd46 2024-11-24T08:31:20,156 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T08:31:20,157 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34776, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T08:31:20,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T08:31:20,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T08:31:20,158 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:31:20,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:31:20,160 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T08:31:20,160 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:31:20,160 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-24T08:31:20,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T08:31:20,161 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T08:31:20,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741835_1011 (size=405) 2024-11-24T08:31:20,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741835_1011 (size=405) 2024-11-24T08:31:20,170 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ec120c4cd85635d252c388b9f9936a30, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f 2024-11-24T08:31:20,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741836_1012 (size=88) 2024-11-24T08:31:20,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741836_1012 (size=88) 2024-11-24T08:31:20,176 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:31:20,176 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing ec120c4cd85635d252c388b9f9936a30, disabling compactions & flushes 2024-11-24T08:31:20,176 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:31:20,176 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:31:20,176 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. after waiting 0 ms 2024-11-24T08:31:20,176 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:31:20,176 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:31:20,176 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for ec120c4cd85635d252c388b9f9936a30: Waiting for close lock at 1732437080176Disabling compacts and flushes for region at 1732437080176Disabling writes for close at 1732437080176Writing region close event to WAL at 1732437080176Closed at 1732437080176 2024-11-24T08:31:20,177 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T08:31:20,178 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732437080177"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732437080177"}]},"ts":"1732437080177"} 2024-11-24T08:31:20,180 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T08:31:20,181 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T08:31:20,181 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732437080181"}]},"ts":"1732437080181"} 2024-11-24T08:31:20,183 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-24T08:31:20,183 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=ec120c4cd85635d252c388b9f9936a30, ASSIGN}] 2024-11-24T08:31:20,184 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=ec120c4cd85635d252c388b9f9936a30, ASSIGN 2024-11-24T08:31:20,185 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=ec120c4cd85635d252c388b9f9936a30, ASSIGN; state=OFFLINE, location=30c28c82771d,41593,1732437079330; forceNewPlan=false, retain=false 2024-11-24T08:31:20,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:20,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:20,336 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ec120c4cd85635d252c388b9f9936a30, regionState=OPENING, regionLocation=30c28c82771d,41593,1732437079330 2024-11-24T08:31:20,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=ec120c4cd85635d252c388b9f9936a30, ASSIGN because future has completed 2024-11-24T08:31:20,339 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ec120c4cd85635d252c388b9f9936a30, server=30c28c82771d,41593,1732437079330}] 2024-11-24T08:31:20,495 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:31:20,495 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ec120c4cd85635d252c388b9f9936a30, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:31:20,496 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling ec120c4cd85635d252c388b9f9936a30 2024-11-24T08:31:20,496 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:31:20,496 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ec120c4cd85635d252c388b9f9936a30 2024-11-24T08:31:20,496 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ec120c4cd85635d252c388b9f9936a30 2024-11-24T08:31:20,497 INFO [StoreOpener-ec120c4cd85635d252c388b9f9936a30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ec120c4cd85635d252c388b9f9936a30 2024-11-24T08:31:20,498 INFO [StoreOpener-ec120c4cd85635d252c388b9f9936a30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ec120c4cd85635d252c388b9f9936a30 columnFamilyName info 2024-11-24T08:31:20,498 DEBUG [StoreOpener-ec120c4cd85635d252c388b9f9936a30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:31:20,499 INFO [StoreOpener-ec120c4cd85635d252c388b9f9936a30-1 {}] regionserver.HStore(327): Store=ec120c4cd85635d252c388b9f9936a30/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:31:20,499 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ec120c4cd85635d252c388b9f9936a30 2024-11-24T08:31:20,499 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30 2024-11-24T08:31:20,500 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30 2024-11-24T08:31:20,500 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ec120c4cd85635d252c388b9f9936a30 2024-11-24T08:31:20,500 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ec120c4cd85635d252c388b9f9936a30 2024-11-24T08:31:20,501 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ec120c4cd85635d252c388b9f9936a30 2024-11-24T08:31:20,503 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:31:20,503 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ec120c4cd85635d252c388b9f9936a30; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=878251, jitterRate=0.11675478518009186}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T08:31:20,503 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ec120c4cd85635d252c388b9f9936a30 2024-11-24T08:31:20,504 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ec120c4cd85635d252c388b9f9936a30: Running coprocessor pre-open hook at 1732437080496Writing region info on filesystem at 1732437080496Initializing all the Stores at 1732437080497 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437080497Cleaning up temporary data from old regions at 1732437080500 (+3 ms)Running coprocessor post-open hooks at 1732437080503 (+3 ms)Region opened successfully at 1732437080504 (+1 ms) 2024-11-24T08:31:20,505 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30., pid=6, masterSystemTime=1732437080491 2024-11-24T08:31:20,507 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:31:20,507 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:31:20,508 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ec120c4cd85635d252c388b9f9936a30, regionState=OPEN, openSeqNum=2, regionLocation=30c28c82771d,41593,1732437079330 2024-11-24T08:31:20,510 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ec120c4cd85635d252c388b9f9936a30, server=30c28c82771d,41593,1732437079330 because future has completed 2024-11-24T08:31:20,513 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T08:31:20,513 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ec120c4cd85635d252c388b9f9936a30, server=30c28c82771d,41593,1732437079330 in 172 msec 2024-11-24T08:31:20,516 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T08:31:20,516 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=ec120c4cd85635d252c388b9f9936a30, ASSIGN in 330 msec 2024-11-24T08:31:20,517 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T08:31:20,517 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732437080517"}]},"ts":"1732437080517"} 2024-11-24T08:31:20,519 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-24T08:31:20,520 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T08:31:20,522 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 362 msec 2024-11-24T08:31:21,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:21,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:22,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:22,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:23,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:23,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:24,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:24,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:25,024 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T08:31:25,025 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:25,026 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:25,026 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:25,026 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:25,026 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:25,026 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:25,041 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:25,041 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:25,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:25,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:25,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:25,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:25,045 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:25,045 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:25,045 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:25,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:31:25,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:25,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:25,570 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T08:31:25,571 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-24T08:31:26,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:26,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:27,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:27,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:28,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:28,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:29,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:29,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:29,522 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T08:31:29,522 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T08:31:29,523 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:31:29,523 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T08:31:29,523 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:31:29,523 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-24T08:31:30,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T08:31:30,167 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T08:31:30,167 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-24T08:31:30,170 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:31:30,170 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:31:30,173 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30., hostname=30c28c82771d,41593,1732437079330, seqNum=2] 2024-11-24T08:31:30,180 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:31:30,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:31:30,186 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T08:31:30,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-24T08:31:30,187 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T08:31:30,188 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T08:31:30,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:30,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:30,349 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41593 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-24T08:31:30,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:31:30,349 INFO [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing ec120c4cd85635d252c388b9f9936a30 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T08:31:30,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/.tmp/info/7e7e47ad00b1455ca8ee665082fdaecb is 1080, key is row0001/info:/1732437090174/Put/seqid=0 2024-11-24T08:31:30,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741837_1013 (size=6033) 2024-11-24T08:31:30,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741837_1013 (size=6033) 2024-11-24T08:31:30,777 INFO [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/.tmp/info/7e7e47ad00b1455ca8ee665082fdaecb 2024-11-24T08:31:30,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/.tmp/info/7e7e47ad00b1455ca8ee665082fdaecb as hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/7e7e47ad00b1455ca8ee665082fdaecb 2024-11-24T08:31:30,789 INFO [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/7e7e47ad00b1455ca8ee665082fdaecb, entries=1, sequenceid=5, filesize=5.9 K 2024-11-24T08:31:30,790 INFO [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ec120c4cd85635d252c388b9f9936a30 in 441ms, sequenceid=5, compaction requested=false 2024-11-24T08:31:30,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for ec120c4cd85635d252c388b9f9936a30: 2024-11-24T08:31:30,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:31:30,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-24T08:31:30,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-24T08:31:30,798 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-24T08:31:30,798 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 607 msec 2024-11-24T08:31:30,800 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 618 msec 2024-11-24T08:31:31,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:31,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:32,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:32,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:33,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:33,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:34,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:34,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:35,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:35,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:36,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:36,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:37,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:37,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:38,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:38,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:39,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:39,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:40,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-24T08:31:40,277 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T08:31:40,280 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:31:40,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:31:40,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-24T08:31:40,282 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T08:31:40,283 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T08:31:40,283 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T08:31:40,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:40,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:40,436 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41593 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-24T08:31:40,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:31:40,437 INFO [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing ec120c4cd85635d252c388b9f9936a30 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T08:31:40,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/.tmp/info/1785f27d782b4447be146f493a406af5 is 1080, key is row0002/info:/1732437100278/Put/seqid=0 2024-11-24T08:31:40,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741838_1014 (size=6033) 2024-11-24T08:31:40,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741838_1014 (size=6033) 2024-11-24T08:31:40,447 INFO [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/.tmp/info/1785f27d782b4447be146f493a406af5 2024-11-24T08:31:40,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/.tmp/info/1785f27d782b4447be146f493a406af5 as hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/1785f27d782b4447be146f493a406af5 2024-11-24T08:31:40,457 INFO [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/1785f27d782b4447be146f493a406af5, entries=1, sequenceid=9, filesize=5.9 K 2024-11-24T08:31:40,458 INFO [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ec120c4cd85635d252c388b9f9936a30 in 21ms, sequenceid=9, compaction requested=false 2024-11-24T08:31:40,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for ec120c4cd85635d252c388b9f9936a30: 2024-11-24T08:31:40,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:31:40,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-24T08:31:40,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-24T08:31:40,462 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-24T08:31:40,462 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-11-24T08:31:40,465 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 183 msec 2024-11-24T08:31:41,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:41,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:42,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:42,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:43,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:43,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:44,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:44,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:45,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:45,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:46,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:46,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:46,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 after 68046ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:31:46,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta after 68035ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T08:31:47,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:47,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:48,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:48,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:49,269 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T08:31:49,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:49,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:50,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-24T08:31:50,327 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T08:31:50,330 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C41593%2C1732437079330.1732437110329 2024-11-24T08:31:50,334 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:50,334 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:50,335 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:50,335 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:50,335 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:31:50,335 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/WALs/30c28c82771d,41593,1732437079330/30c28c82771d%2C41593%2C1732437079330.1732437079707 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/WALs/30c28c82771d,41593,1732437079330/30c28c82771d%2C41593%2C1732437079330.1732437110329 2024-11-24T08:31:50,336 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39209:39209),(127.0.0.1/127.0.0.1:35759:35759)] 2024-11-24T08:31:50,336 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/WALs/30c28c82771d,41593,1732437079330/30c28c82771d%2C41593%2C1732437079330.1732437079707 is not closed yet, will try archiving it next time 2024-11-24T08:31:50,337 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:31:50,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741833_1009 (size=5546) 2024-11-24T08:31:50,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741833_1009 (size=5546) 2024-11-24T08:31:50,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:31:50,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-24T08:31:50,339 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T08:31:50,340 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T08:31:50,340 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T08:31:50,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:50,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:50,493 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41593 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-24T08:31:50,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:31:50,494 INFO [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing ec120c4cd85635d252c388b9f9936a30 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T08:31:50,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/.tmp/info/91916fadd8d44c5a8af3e347c5fb050f is 1080, key is row0003/info:/1732437110328/Put/seqid=0 2024-11-24T08:31:50,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741840_1016 (size=6033) 2024-11-24T08:31:50,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741840_1016 (size=6033) 2024-11-24T08:31:50,503 INFO [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/.tmp/info/91916fadd8d44c5a8af3e347c5fb050f 2024-11-24T08:31:50,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/.tmp/info/91916fadd8d44c5a8af3e347c5fb050f as hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/91916fadd8d44c5a8af3e347c5fb050f 2024-11-24T08:31:50,514 INFO [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/91916fadd8d44c5a8af3e347c5fb050f, entries=1, sequenceid=13, filesize=5.9 K 2024-11-24T08:31:50,515 INFO [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ec120c4cd85635d252c388b9f9936a30 in 21ms, sequenceid=13, compaction requested=true 2024-11-24T08:31:50,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for ec120c4cd85635d252c388b9f9936a30: 2024-11-24T08:31:50,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:31:50,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-24T08:31:50,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-24T08:31:50,519 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-24T08:31:50,519 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 176 msec 2024-11-24T08:31:50,521 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 183 msec 2024-11-24T08:31:51,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:51,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:52,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:52,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:53,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:53,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:54,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:54,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:55,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:55,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:56,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:56,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:57,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:57,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:58,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:58,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:59,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:31:59,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:00,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:00,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:00,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-24T08:32:00,437 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T08:32:00,437 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:32:00,439 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:32:00,439 DEBUG [Time-limited test {}] regionserver.HStore(1541): ec120c4cd85635d252c388b9f9936a30/info is initiating minor compaction (all files) 2024-11-24T08:32:00,439 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T08:32:00,439 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:00,439 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of ec120c4cd85635d252c388b9f9936a30/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:32:00,439 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/7e7e47ad00b1455ca8ee665082fdaecb, hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/1785f27d782b4447be146f493a406af5, hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/91916fadd8d44c5a8af3e347c5fb050f] into tmpdir=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/.tmp, totalSize=17.7 K 2024-11-24T08:32:00,440 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 7e7e47ad00b1455ca8ee665082fdaecb, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732437090174 2024-11-24T08:32:00,440 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 1785f27d782b4447be146f493a406af5, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732437100278 2024-11-24T08:32:00,440 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 91916fadd8d44c5a8af3e347c5fb050f, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732437110328 2024-11-24T08:32:00,451 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): ec120c4cd85635d252c388b9f9936a30#info#compaction#44 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:32:00,452 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/.tmp/info/7898d0adaaa64affbe9a1df52a87610c is 1080, key is row0001/info:/1732437090174/Put/seqid=0 2024-11-24T08:32:00,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741841_1017 (size=8296) 2024-11-24T08:32:00,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741841_1017 (size=8296) 2024-11-24T08:32:00,464 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/.tmp/info/7898d0adaaa64affbe9a1df52a87610c as hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/7898d0adaaa64affbe9a1df52a87610c 2024-11-24T08:32:00,470 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ec120c4cd85635d252c388b9f9936a30/info of ec120c4cd85635d252c388b9f9936a30 into 7898d0adaaa64affbe9a1df52a87610c(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:32:00,470 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for ec120c4cd85635d252c388b9f9936a30: 2024-11-24T08:32:00,472 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C41593%2C1732437079330.1732437120472 2024-11-24T08:32:00,477 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:00,477 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:00,477 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:00,478 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:00,478 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:00,478 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/WALs/30c28c82771d,41593,1732437079330/30c28c82771d%2C41593%2C1732437079330.1732437110329 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/WALs/30c28c82771d,41593,1732437079330/30c28c82771d%2C41593%2C1732437079330.1732437120472 2024-11-24T08:32:00,478 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39209:39209),(127.0.0.1/127.0.0.1:35759:35759)] 2024-11-24T08:32:00,479 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/WALs/30c28c82771d,41593,1732437079330/30c28c82771d%2C41593%2C1732437079330.1732437110329 is not closed yet, will try archiving it next time 2024-11-24T08:32:00,479 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/WALs/30c28c82771d,41593,1732437079330/30c28c82771d%2C41593%2C1732437079330.1732437079707 to hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/oldWALs/30c28c82771d%2C41593%2C1732437079330.1732437079707 2024-11-24T08:32:00,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741839_1015 (size=2520) 2024-11-24T08:32:00,480 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:32:00,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:32:00,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741839_1015 (size=2520) 2024-11-24T08:32:00,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-24T08:32:00,482 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T08:32:00,483 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T08:32:00,483 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T08:32:00,636 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41593 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-24T08:32:00,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:32:00,637 INFO [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing ec120c4cd85635d252c388b9f9936a30 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T08:32:00,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/.tmp/info/db27271e07b9487287429e7dd0cb18f4 is 1080, key is row0000/info:/1732437120471/Put/seqid=0 2024-11-24T08:32:00,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741843_1019 (size=6033) 2024-11-24T08:32:00,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741843_1019 (size=6033) 2024-11-24T08:32:00,646 INFO [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/.tmp/info/db27271e07b9487287429e7dd0cb18f4 2024-11-24T08:32:00,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/.tmp/info/db27271e07b9487287429e7dd0cb18f4 as hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/db27271e07b9487287429e7dd0cb18f4 2024-11-24T08:32:00,657 INFO [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/db27271e07b9487287429e7dd0cb18f4, entries=1, sequenceid=18, filesize=5.9 K 2024-11-24T08:32:00,658 INFO [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ec120c4cd85635d252c388b9f9936a30 in 21ms, sequenceid=18, compaction requested=false 2024-11-24T08:32:00,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for ec120c4cd85635d252c388b9f9936a30: 2024-11-24T08:32:00,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:32:00,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-24T08:32:00,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-24T08:32:00,663 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-24T08:32:00,663 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-11-24T08:32:00,666 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 184 msec 2024-11-24T08:32:00,977 INFO [master/30c28c82771d:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T08:32:00,978 INFO [master/30c28c82771d:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T08:32:01,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:01,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:02,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:02,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:03,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:03,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:04,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:04,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:05,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:05,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:05,496 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ec120c4cd85635d252c388b9f9936a30, had cached 0 bytes from a total of 14329 2024-11-24T08:32:06,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:06,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:07,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:07,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:08,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:08,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:09,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:09,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:10,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:10,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:10,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37865 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-24T08:32:10,516 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T08:32:10,519 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C41593%2C1732437079330.1732437130519 2024-11-24T08:32:10,525 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:10,525 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:10,525 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:10,525 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:10,525 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:10,526 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/WALs/30c28c82771d,41593,1732437079330/30c28c82771d%2C41593%2C1732437079330.1732437120472 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/WALs/30c28c82771d,41593,1732437079330/30c28c82771d%2C41593%2C1732437079330.1732437130519 2024-11-24T08:32:10,526 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39209:39209),(127.0.0.1/127.0.0.1:35759:35759)] 2024-11-24T08:32:10,526 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/WALs/30c28c82771d,41593,1732437079330/30c28c82771d%2C41593%2C1732437079330.1732437120472 is not closed yet, will try archiving it next time 2024-11-24T08:32:10,527 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T08:32:10,527 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/WALs/30c28c82771d,41593,1732437079330/30c28c82771d%2C41593%2C1732437079330.1732437110329 to hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/oldWALs/30c28c82771d%2C41593%2C1732437079330.1732437110329 2024-11-24T08:32:10,527 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:32:10,527 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:32:10,527 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:32:10,527 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:32:10,527 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T08:32:10,527 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T08:32:10,527 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1615903126, stopped=false 2024-11-24T08:32:10,527 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=30c28c82771d,37865,1732437079285 2024-11-24T08:32:10,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741842_1018 (size=2026) 2024-11-24T08:32:10,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741842_1018 (size=2026) 2024-11-24T08:32:10,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:32:10,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41593-0x1014919df760001, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:32:10,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41593-0x1014919df760001, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:32:10,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:32:10,529 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:32:10,529 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:32:10,530 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:32:10,530 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:32:10,530 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41593-0x1014919df760001, quorum=127.0.0.1:51285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:32:10,530 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '30c28c82771d,41593,1732437079330' ***** 2024-11-24T08:32:10,530 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T08:32:10,530 INFO [RS:0;30c28c82771d:41593 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T08:32:10,530 INFO [RS:0;30c28c82771d:41593 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T08:32:10,530 INFO [RS:0;30c28c82771d:41593 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T08:32:10,530 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:32:10,530 INFO [RS:0;30c28c82771d:41593 {}] regionserver.HRegionServer(3091): Received CLOSE for ec120c4cd85635d252c388b9f9936a30 2024-11-24T08:32:10,531 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T08:32:10,531 INFO [RS:0;30c28c82771d:41593 {}] regionserver.HRegionServer(959): stopping server 30c28c82771d,41593,1732437079330 2024-11-24T08:32:10,531 INFO [RS:0;30c28c82771d:41593 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:32:10,531 INFO [RS:0;30c28c82771d:41593 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;30c28c82771d:41593. 2024-11-24T08:32:10,531 DEBUG [RS:0;30c28c82771d:41593 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:32:10,531 DEBUG [RS:0;30c28c82771d:41593 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:32:10,531 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ec120c4cd85635d252c388b9f9936a30, disabling compactions & flushes 2024-11-24T08:32:10,531 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:32:10,531 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:32:10,531 INFO [RS:0;30c28c82771d:41593 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T08:32:10,531 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. after waiting 0 ms 2024-11-24T08:32:10,531 INFO [RS:0;30c28c82771d:41593 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T08:32:10,531 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:32:10,531 INFO [RS:0;30c28c82771d:41593 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T08:32:10,531 INFO [RS:0;30c28c82771d:41593 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T08:32:10,531 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing ec120c4cd85635d252c388b9f9936a30 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T08:32:10,531 INFO [RS:0;30c28c82771d:41593 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T08:32:10,531 DEBUG [RS:0;30c28c82771d:41593 {}] regionserver.HRegionServer(1325): Online Regions={ec120c4cd85635d252c388b9f9936a30=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30., 1588230740=hbase:meta,,1.1588230740} 2024-11-24T08:32:10,531 DEBUG [RS:0;30c28c82771d:41593 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ec120c4cd85635d252c388b9f9936a30 2024-11-24T08:32:10,531 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:32:10,531 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:32:10,531 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:32:10,531 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:32:10,531 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:32:10,532 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-24T08:32:10,536 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/.tmp/info/38ec474de9f64c4ca9d2cff81f7a4b73 is 1080, key is row0001/info:/1732437130517/Put/seqid=0 2024-11-24T08:32:10,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741845_1021 (size=6033) 2024-11-24T08:32:10,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741845_1021 (size=6033) 2024-11-24T08:32:10,542 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/.tmp/info/38ec474de9f64c4ca9d2cff81f7a4b73 2024-11-24T08:32:10,549 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/.tmp/info/38ec474de9f64c4ca9d2cff81f7a4b73 as hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/38ec474de9f64c4ca9d2cff81f7a4b73 2024-11-24T08:32:10,551 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740/.tmp/info/589eba4f93bb4a2cbfd88f3c7a962d99 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30./info:regioninfo/1732437080508/Put/seqid=0 2024-11-24T08:32:10,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741846_1022 (size=7308) 2024-11-24T08:32:10,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741846_1022 (size=7308) 2024-11-24T08:32:10,556 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/38ec474de9f64c4ca9d2cff81f7a4b73, entries=1, sequenceid=22, filesize=5.9 K 2024-11-24T08:32:10,556 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740/.tmp/info/589eba4f93bb4a2cbfd88f3c7a962d99 2024-11-24T08:32:10,557 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ec120c4cd85635d252c388b9f9936a30 in 26ms, sequenceid=22, compaction requested=true 2024-11-24T08:32:10,558 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/7e7e47ad00b1455ca8ee665082fdaecb, hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/1785f27d782b4447be146f493a406af5, hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/91916fadd8d44c5a8af3e347c5fb050f] to archive 2024-11-24T08:32:10,558 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T08:32:10,560 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/7e7e47ad00b1455ca8ee665082fdaecb to hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/7e7e47ad00b1455ca8ee665082fdaecb 2024-11-24T08:32:10,561 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/1785f27d782b4447be146f493a406af5 to hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/1785f27d782b4447be146f493a406af5 2024-11-24T08:32:10,563 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/91916fadd8d44c5a8af3e347c5fb050f to hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/info/91916fadd8d44c5a8af3e347c5fb050f 2024-11-24T08:32:10,563 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=30c28c82771d:37865 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-24T08:32:10,563 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [7e7e47ad00b1455ca8ee665082fdaecb=6033, 1785f27d782b4447be146f493a406af5=6033, 91916fadd8d44c5a8af3e347c5fb050f=6033] 2024-11-24T08:32:10,567 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec120c4cd85635d252c388b9f9936a30/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-24T08:32:10,567 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:32:10,568 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ec120c4cd85635d252c388b9f9936a30: Waiting for close lock at 1732437130531Running coprocessor pre-close hooks at 1732437130531Disabling compacts and flushes for region at 1732437130531Disabling writes for close at 1732437130531Obtaining lock to block concurrent updates at 1732437130531Preparing flush snapshotting stores in ec120c4cd85635d252c388b9f9936a30 at 1732437130531Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732437130531Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. at 1732437130532 (+1 ms)Flushing ec120c4cd85635d252c388b9f9936a30/info: creating writer at 1732437130532Flushing ec120c4cd85635d252c388b9f9936a30/info: appending metadata at 1732437130536 (+4 ms)Flushing ec120c4cd85635d252c388b9f9936a30/info: closing flushed file at 1732437130536Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d640b58: reopening flushed file at 1732437130548 (+12 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ec120c4cd85635d252c388b9f9936a30 in 26ms, sequenceid=22, compaction requested=true at 1732437130557 (+9 ms)Writing region close event to WAL at 1732437130564 (+7 ms)Running coprocessor post-close hooks at 1732437130567 (+3 ms)Closed at 1732437130567 2024-11-24T08:32:10,568 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732437080157.ec120c4cd85635d252c388b9f9936a30. 2024-11-24T08:32:10,576 INFO [regionserver/30c28c82771d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T08:32:10,576 INFO [regionserver/30c28c82771d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T08:32:10,577 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740/.tmp/ns/3225fab42740434ab62d4052bd7e6b31 is 43, key is default/ns:d/1732437080112/Put/seqid=0 2024-11-24T08:32:10,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741847_1023 (size=5153) 2024-11-24T08:32:10,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741847_1023 (size=5153) 2024-11-24T08:32:10,583 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740/.tmp/ns/3225fab42740434ab62d4052bd7e6b31 2024-11-24T08:32:10,602 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740/.tmp/table/d3b99664d7a54552baa0df040a5ef467 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732437080517/Put/seqid=0 2024-11-24T08:32:10,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741848_1024 (size=5508) 2024-11-24T08:32:10,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741848_1024 (size=5508) 2024-11-24T08:32:10,608 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740/.tmp/table/d3b99664d7a54552baa0df040a5ef467 2024-11-24T08:32:10,613 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740/.tmp/info/589eba4f93bb4a2cbfd88f3c7a962d99 as hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740/info/589eba4f93bb4a2cbfd88f3c7a962d99 2024-11-24T08:32:10,619 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740/info/589eba4f93bb4a2cbfd88f3c7a962d99, entries=10, sequenceid=11, filesize=7.1 K 2024-11-24T08:32:10,620 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740/.tmp/ns/3225fab42740434ab62d4052bd7e6b31 as hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740/ns/3225fab42740434ab62d4052bd7e6b31 2024-11-24T08:32:10,624 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740/ns/3225fab42740434ab62d4052bd7e6b31, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T08:32:10,625 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740/.tmp/table/d3b99664d7a54552baa0df040a5ef467 as hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740/table/d3b99664d7a54552baa0df040a5ef467 2024-11-24T08:32:10,630 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740/table/d3b99664d7a54552baa0df040a5ef467, entries=2, sequenceid=11, filesize=5.4 K 2024-11-24T08:32:10,631 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 100ms, sequenceid=11, compaction requested=false 2024-11-24T08:32:10,636 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T08:32:10,637 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:32:10,637 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:32:10,637 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732437130531Running coprocessor pre-close hooks at 1732437130531Disabling compacts and flushes for region at 1732437130531Disabling writes for close at 1732437130531Obtaining lock to block concurrent updates at 1732437130532 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732437130532Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732437130532Flushing stores of hbase:meta,,1.1588230740 at 1732437130533 (+1 ms)Flushing 1588230740/info: creating writer at 1732437130533Flushing 1588230740/info: appending metadata at 1732437130550 (+17 ms)Flushing 1588230740/info: closing flushed file at 1732437130550Flushing 1588230740/ns: creating writer at 1732437130562 (+12 ms)Flushing 1588230740/ns: appending metadata at 1732437130577 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732437130577Flushing 1588230740/table: creating writer at 1732437130588 (+11 ms)Flushing 1588230740/table: appending metadata at 1732437130602 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732437130602Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49e654e8: reopening flushed file at 1732437130613 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7002ffc: reopening flushed file at 1732437130619 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23602f93: reopening flushed file at 1732437130625 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 100ms, sequenceid=11, compaction requested=false at 1732437130631 (+6 ms)Writing region close event to WAL at 1732437130633 (+2 ms)Running coprocessor post-close hooks at 1732437130637 (+4 ms)Closed at 1732437130637 2024-11-24T08:32:10,637 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T08:32:10,731 INFO [RS:0;30c28c82771d:41593 {}] regionserver.HRegionServer(976): stopping server 30c28c82771d,41593,1732437079330; all regions closed. 2024-11-24T08:32:10,732 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:10,732 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:10,732 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:10,732 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:10,732 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:10,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741834_1010 (size=3306) 2024-11-24T08:32:10,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741834_1010 (size=3306) 2024-11-24T08:32:10,737 DEBUG [RS:0;30c28c82771d:41593 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/oldWALs 2024-11-24T08:32:10,737 INFO [RS:0;30c28c82771d:41593 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30c28c82771d%2C41593%2C1732437079330.meta:.meta(num 1732437080070) 2024-11-24T08:32:10,738 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:10,738 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:10,738 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:10,738 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:10,738 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:10,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741844_1020 (size=1252) 2024-11-24T08:32:10,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741844_1020 (size=1252) 2024-11-24T08:32:10,929 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/WALs/30c28c82771d,41593,1732437079330/30c28c82771d%2C41593%2C1732437079330.1732437120472 to hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/oldWALs/30c28c82771d%2C41593%2C1732437079330.1732437120472 2024-11-24T08:32:10,932 DEBUG [RS:0;30c28c82771d:41593 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/oldWALs 2024-11-24T08:32:10,932 INFO [RS:0;30c28c82771d:41593 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30c28c82771d%2C41593%2C1732437079330:(num 1732437130519) 2024-11-24T08:32:10,932 DEBUG [RS:0;30c28c82771d:41593 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:32:10,932 INFO [RS:0;30c28c82771d:41593 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:32:10,932 INFO [RS:0;30c28c82771d:41593 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:32:10,932 INFO [RS:0;30c28c82771d:41593 {}] hbase.ChoreService(370): Chore service for: regionserver/30c28c82771d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T08:32:10,932 INFO [RS:0;30c28c82771d:41593 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:32:10,932 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:32:10,932 INFO [RS:0;30c28c82771d:41593 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41593 2024-11-24T08:32:10,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:32:10,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41593-0x1014919df760001, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/30c28c82771d,41593,1732437079330 2024-11-24T08:32:10,935 INFO [RS:0;30c28c82771d:41593 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:32:10,937 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [30c28c82771d,41593,1732437079330] 2024-11-24T08:32:10,938 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/30c28c82771d,41593,1732437079330 already deleted, retry=false 2024-11-24T08:32:10,938 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 30c28c82771d,41593,1732437079330 expired; onlineServers=0 2024-11-24T08:32:10,938 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '30c28c82771d,37865,1732437079285' ***** 2024-11-24T08:32:10,938 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T08:32:10,938 INFO [M:0;30c28c82771d:37865 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:32:10,938 INFO [M:0;30c28c82771d:37865 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:32:10,938 DEBUG [M:0;30c28c82771d:37865 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T08:32:10,938 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T08:32:10,938 DEBUG [M:0;30c28c82771d:37865 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T08:32:10,938 DEBUG [master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732437079475 {}] cleaner.HFileCleaner(306): Exit Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732437079475,5,FailOnTimeoutGroup] 2024-11-24T08:32:10,938 DEBUG [master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732437079475 {}] cleaner.HFileCleaner(306): Exit Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732437079475,5,FailOnTimeoutGroup] 2024-11-24T08:32:10,938 INFO [M:0;30c28c82771d:37865 {}] hbase.ChoreService(370): Chore service for: master/30c28c82771d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T08:32:10,938 INFO [M:0;30c28c82771d:37865 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:32:10,938 DEBUG [M:0;30c28c82771d:37865 {}] master.HMaster(1795): Stopping service threads 2024-11-24T08:32:10,939 INFO [M:0;30c28c82771d:37865 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T08:32:10,939 INFO [M:0;30c28c82771d:37865 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:32:10,939 INFO [M:0;30c28c82771d:37865 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T08:32:10,939 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T08:32:10,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T08:32:10,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:32:10,939 DEBUG [M:0;30c28c82771d:37865 {}] zookeeper.ZKUtil(347): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T08:32:10,940 WARN [M:0;30c28c82771d:37865 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T08:32:10,940 INFO [M:0;30c28c82771d:37865 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/.lastflushedseqids 2024-11-24T08:32:10,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741849_1025 (size=130) 2024-11-24T08:32:10,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741849_1025 (size=130) 2024-11-24T08:32:10,946 INFO [M:0;30c28c82771d:37865 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T08:32:10,946 INFO [M:0;30c28c82771d:37865 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T08:32:10,946 DEBUG [M:0;30c28c82771d:37865 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:32:10,946 INFO [M:0;30c28c82771d:37865 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:32:10,946 DEBUG [M:0;30c28c82771d:37865 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:32:10,946 DEBUG [M:0;30c28c82771d:37865 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:32:10,946 DEBUG [M:0;30c28c82771d:37865 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:32:10,946 INFO [M:0;30c28c82771d:37865 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.54 KB heapSize=54.91 KB 2024-11-24T08:32:10,962 DEBUG [M:0;30c28c82771d:37865 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d118ef9849bb4151986c868fe4716fc6 is 82, key is hbase:meta,,1/info:regioninfo/1732437080096/Put/seqid=0 2024-11-24T08:32:10,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741850_1026 (size=5672) 2024-11-24T08:32:10,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741850_1026 (size=5672) 2024-11-24T08:32:10,968 INFO [M:0;30c28c82771d:37865 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d118ef9849bb4151986c868fe4716fc6 2024-11-24T08:32:10,988 DEBUG [M:0;30c28c82771d:37865 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3356529786fe4bd09a0e98c536dda79a is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732437080521/Put/seqid=0 2024-11-24T08:32:10,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741851_1027 (size=7818) 2024-11-24T08:32:10,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741851_1027 (size=7818) 2024-11-24T08:32:10,994 INFO [M:0;30c28c82771d:37865 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.94 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3356529786fe4bd09a0e98c536dda79a 2024-11-24T08:32:10,998 INFO [M:0;30c28c82771d:37865 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3356529786fe4bd09a0e98c536dda79a 2024-11-24T08:32:11,013 DEBUG [M:0;30c28c82771d:37865 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f1effc22b5364803abdf28f29392c255 is 69, key is 30c28c82771d,41593,1732437079330/rs:state/1732437079563/Put/seqid=0 2024-11-24T08:32:11,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741852_1028 (size=5156) 2024-11-24T08:32:11,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741852_1028 (size=5156) 2024-11-24T08:32:11,019 INFO [M:0;30c28c82771d:37865 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f1effc22b5364803abdf28f29392c255 2024-11-24T08:32:11,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41593-0x1014919df760001, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:32:11,037 INFO [RS:0;30c28c82771d:41593 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:32:11,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41593-0x1014919df760001, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:32:11,037 INFO [RS:0;30c28c82771d:41593 {}] regionserver.HRegionServer(1031): Exiting; stopping=30c28c82771d,41593,1732437079330; zookeeper connection closed. 2024-11-24T08:32:11,037 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@762d1d24 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@762d1d24 2024-11-24T08:32:11,037 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T08:32:11,038 DEBUG [M:0;30c28c82771d:37865 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/92e8537e137c4919bcf3800b7e3de092 is 52, key is load_balancer_on/state:d/1732437080154/Put/seqid=0 2024-11-24T08:32:11,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741853_1029 (size=5056) 2024-11-24T08:32:11,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741853_1029 (size=5056) 2024-11-24T08:32:11,044 INFO [M:0;30c28c82771d:37865 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/92e8537e137c4919bcf3800b7e3de092 2024-11-24T08:32:11,049 DEBUG [M:0;30c28c82771d:37865 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d118ef9849bb4151986c868fe4716fc6 as hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d118ef9849bb4151986c868fe4716fc6 2024-11-24T08:32:11,054 INFO [M:0;30c28c82771d:37865 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d118ef9849bb4151986c868fe4716fc6, entries=8, sequenceid=121, filesize=5.5 K 2024-11-24T08:32:11,055 DEBUG [M:0;30c28c82771d:37865 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3356529786fe4bd09a0e98c536dda79a as hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3356529786fe4bd09a0e98c536dda79a 2024-11-24T08:32:11,060 INFO [M:0;30c28c82771d:37865 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3356529786fe4bd09a0e98c536dda79a 2024-11-24T08:32:11,060 INFO [M:0;30c28c82771d:37865 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3356529786fe4bd09a0e98c536dda79a, entries=14, sequenceid=121, filesize=7.6 K 2024-11-24T08:32:11,061 DEBUG [M:0;30c28c82771d:37865 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f1effc22b5364803abdf28f29392c255 as hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f1effc22b5364803abdf28f29392c255 2024-11-24T08:32:11,065 INFO [M:0;30c28c82771d:37865 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f1effc22b5364803abdf28f29392c255, entries=1, sequenceid=121, filesize=5.0 K 2024-11-24T08:32:11,066 DEBUG [M:0;30c28c82771d:37865 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/92e8537e137c4919bcf3800b7e3de092 as hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/92e8537e137c4919bcf3800b7e3de092 2024-11-24T08:32:11,071 INFO [M:0;30c28c82771d:37865 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44781/user/jenkins/test-data/a5cadc89-db72-dad1-729c-d7c77ab29a4f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/92e8537e137c4919bcf3800b7e3de092, entries=1, sequenceid=121, filesize=4.9 K 2024-11-24T08:32:11,072 INFO [M:0;30c28c82771d:37865 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=121, compaction requested=false 2024-11-24T08:32:11,073 INFO [M:0;30c28c82771d:37865 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:32:11,073 DEBUG [M:0;30c28c82771d:37865 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732437130946Disabling compacts and flushes for region at 1732437130946Disabling writes for close at 1732437130946Obtaining lock to block concurrent updates at 1732437130946Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732437130946Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44590, getHeapSize=56168, getOffHeapSize=0, getCellsCount=140 at 1732437130947 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732437130947Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732437130948 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732437130962 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732437130962Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732437130973 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732437130987 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732437130988 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732437130998 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732437131013 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732437131013Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732437131024 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732437131038 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732437131038Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f4f441c: reopening flushed file at 1732437131049 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@188d99df: reopening flushed file at 1732437131055 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24901474: reopening flushed file at 1732437131061 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@62c33732: reopening flushed file at 1732437131066 (+5 ms)Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=121, compaction requested=false at 1732437131072 (+6 ms)Writing region close event to WAL at 1732437131073 (+1 ms)Closed at 1732437131073 2024-11-24T08:32:11,074 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:11,074 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:11,074 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:11,074 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:11,074 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:32:11,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41379 is added to blk_1073741830_1006 (size=52987) 2024-11-24T08:32:11,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41107 is added to blk_1073741830_1006 (size=52987) 2024-11-24T08:32:11,077 INFO [M:0;30c28c82771d:37865 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T08:32:11,077 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:32:11,077 INFO [M:0;30c28c82771d:37865 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37865 2024-11-24T08:32:11,077 INFO [M:0;30c28c82771d:37865 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:32:11,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:32:11,179 INFO [M:0;30c28c82771d:37865 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:32:11,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37865-0x1014919df760000, quorum=127.0.0.1:51285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:32:11,182 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5aa33ca4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:32:11,182 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3be31a0b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:32:11,182 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:32:11,182 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@9b25e94{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:32:11,182 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@507832d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/hadoop.log.dir/,STOPPED} 2024-11-24T08:32:11,184 WARN [BP-1368030800-172.17.0.2-1732437078639 heartbeating to localhost/127.0.0.1:44781 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:32:11,184 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:32:11,184 WARN [BP-1368030800-172.17.0.2-1732437078639 heartbeating to localhost/127.0.0.1:44781 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1368030800-172.17.0.2-1732437078639 (Datanode Uuid e2934c93-d81b-4027-84bc-3fc9862443fa) service to localhost/127.0.0.1:44781 2024-11-24T08:32:11,184 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:32:11,184 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/cluster_97aa70e0-3c6b-5aad-a9f6-a6267a8d6d24/data/data3/current/BP-1368030800-172.17.0.2-1732437078639 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:32:11,185 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/cluster_97aa70e0-3c6b-5aad-a9f6-a6267a8d6d24/data/data4/current/BP-1368030800-172.17.0.2-1732437078639 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:32:11,185 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:32:11,187 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@206f042f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:32:11,187 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@374dfdaf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:32:11,187 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:32:11,187 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1367dc96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:32:11,187 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@df163d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/hadoop.log.dir/,STOPPED} 2024-11-24T08:32:11,189 WARN [BP-1368030800-172.17.0.2-1732437078639 heartbeating to localhost/127.0.0.1:44781 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:32:11,189 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:32:11,189 WARN [BP-1368030800-172.17.0.2-1732437078639 heartbeating to localhost/127.0.0.1:44781 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1368030800-172.17.0.2-1732437078639 (Datanode Uuid 1e7fa05b-04db-40ea-ae37-b44b861b9dc6) service to localhost/127.0.0.1:44781 2024-11-24T08:32:11,189 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:32:11,190 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/cluster_97aa70e0-3c6b-5aad-a9f6-a6267a8d6d24/data/data1/current/BP-1368030800-172.17.0.2-1732437078639 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:32:11,190 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/cluster_97aa70e0-3c6b-5aad-a9f6-a6267a8d6d24/data/data2/current/BP-1368030800-172.17.0.2-1732437078639 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:32:11,190 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:32:11,196 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b4ea813{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:32:11,197 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@113ad868{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:32:11,197 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:32:11,197 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6df6a7f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:32:11,197 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2712345{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/hadoop.log.dir/,STOPPED} 2024-11-24T08:32:11,203 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T08:32:11,220 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T08:32:11,229 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=207 (was 181) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44781 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:44781 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44781 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44781 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44781 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/30c28c82771d:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44781 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44781 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44781 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 456) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=48 (was 113), ProcessCount=11 (was 11), AvailableMemoryMB=6431 (was 6621) 2024-11-24T08:32:11,237 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=207, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=48, ProcessCount=11, AvailableMemoryMB=6431 2024-11-24T08:32:11,237 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T08:32:11,237 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/hadoop.log.dir so I do NOT create it in target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc 2024-11-24T08:32:11,237 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de5b56e5-1f8c-7852-2f1f-17707ed08674/hadoop.tmp.dir so I do NOT create it in target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc 2024-11-24T08:32:11,237 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/cluster_f9b4c950-e343-bd3f-4844-0109e911b38b, deleteOnExit=true 2024-11-24T08:32:11,237 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T08:32:11,237 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/test.cache.data in system properties and HBase conf 2024-11-24T08:32:11,237 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T08:32:11,237 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/hadoop.log.dir in system properties and HBase conf 2024-11-24T08:32:11,237 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T08:32:11,237 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T08:32:11,237 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T08:32:11,238 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T08:32:11,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:32:11,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:32:11,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T08:32:11,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:32:11,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T08:32:11,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T08:32:11,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:32:11,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:32:11,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T08:32:11,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/nfs.dump.dir in system properties and HBase conf 2024-11-24T08:32:11,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/java.io.tmpdir in system properties and HBase conf 2024-11-24T08:32:11,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:32:11,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T08:32:11,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T08:32:11,252 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:32:11,314 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:32:11,319 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:32:11,320 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:32:11,320 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:32:11,320 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:32:11,321 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:32:11,321 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69d49ce9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:32:11,322 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2dd2b381{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:32:11,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:11,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:11,436 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@63e2e387{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/java.io.tmpdir/jetty-localhost-42967-hadoop-hdfs-3_4_1-tests_jar-_-any-12908379745780716303/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:32:11,436 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@66ce6cc0{HTTP/1.1, (http/1.1)}{localhost:42967} 2024-11-24T08:32:11,436 INFO [Time-limited test {}] server.Server(415): Started @237559ms 2024-11-24T08:32:11,449 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:32:11,525 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:32:11,528 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:32:11,528 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:32:11,528 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:32:11,528 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:32:11,529 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7cf57c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:32:11,529 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@76e6f692{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:32:11,577 INFO [regionserver/30c28c82771d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:32:11,643 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3612be31{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/java.io.tmpdir/jetty-localhost-33367-hadoop-hdfs-3_4_1-tests_jar-_-any-9838132083426050116/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:32:11,644 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@51585bde{HTTP/1.1, (http/1.1)}{localhost:33367} 2024-11-24T08:32:11,644 INFO [Time-limited test {}] server.Server(415): Started @237767ms 2024-11-24T08:32:11,645 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:32:11,673 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:32:11,675 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:32:11,676 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:32:11,676 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:32:11,676 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:32:11,676 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60b9b83d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:32:11,677 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b95b0ea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:32:11,758 WARN [Thread-1953 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/cluster_f9b4c950-e343-bd3f-4844-0109e911b38b/data/data2/current/BP-576311980-172.17.0.2-1732437131257/current, will proceed with Du for space computation calculation, 2024-11-24T08:32:11,758 WARN [Thread-1952 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/cluster_f9b4c950-e343-bd3f-4844-0109e911b38b/data/data1/current/BP-576311980-172.17.0.2-1732437131257/current, will proceed with Du for space computation calculation, 2024-11-24T08:32:11,782 WARN [Thread-1931 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:32:11,784 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1ddf6478cbf82e31 with lease ID 0x7aef19fac75598c1: Processing first storage report for DS-cb7fda0d-ab91-4116-a5f7-cb54456152d7 from datanode DatanodeRegistration(127.0.0.1:33385, datanodeUuid=692e365f-e0a1-4a55-9cc6-7579731da366, infoPort=44795, infoSecurePort=0, ipcPort=34783, storageInfo=lv=-57;cid=testClusterID;nsid=531963860;c=1732437131257) 2024-11-24T08:32:11,784 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1ddf6478cbf82e31 with lease ID 0x7aef19fac75598c1: from storage DS-cb7fda0d-ab91-4116-a5f7-cb54456152d7 node DatanodeRegistration(127.0.0.1:33385, datanodeUuid=692e365f-e0a1-4a55-9cc6-7579731da366, infoPort=44795, infoSecurePort=0, ipcPort=34783, storageInfo=lv=-57;cid=testClusterID;nsid=531963860;c=1732437131257), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:32:11,785 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1ddf6478cbf82e31 with lease ID 0x7aef19fac75598c1: Processing first storage report for DS-f3292741-4ff5-4e9e-8692-829b3bcddad5 from datanode DatanodeRegistration(127.0.0.1:33385, datanodeUuid=692e365f-e0a1-4a55-9cc6-7579731da366, infoPort=44795, infoSecurePort=0, ipcPort=34783, storageInfo=lv=-57;cid=testClusterID;nsid=531963860;c=1732437131257) 2024-11-24T08:32:11,785 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1ddf6478cbf82e31 with lease ID 0x7aef19fac75598c1: from storage DS-f3292741-4ff5-4e9e-8692-829b3bcddad5 node DatanodeRegistration(127.0.0.1:33385, datanodeUuid=692e365f-e0a1-4a55-9cc6-7579731da366, infoPort=44795, infoSecurePort=0, ipcPort=34783, storageInfo=lv=-57;cid=testClusterID;nsid=531963860;c=1732437131257), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T08:32:11,809 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@77f3cd08{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/java.io.tmpdir/jetty-localhost-39433-hadoop-hdfs-3_4_1-tests_jar-_-any-1412573190621777257/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:32:11,810 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2401b6df{HTTP/1.1, (http/1.1)}{localhost:39433} 2024-11-24T08:32:11,810 INFO [Time-limited test {}] server.Server(415): Started @237932ms 2024-11-24T08:32:11,811 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:32:11,916 WARN [Thread-1979 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/cluster_f9b4c950-e343-bd3f-4844-0109e911b38b/data/data4/current/BP-576311980-172.17.0.2-1732437131257/current, will proceed with Du for space computation calculation, 2024-11-24T08:32:11,916 WARN [Thread-1978 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/cluster_f9b4c950-e343-bd3f-4844-0109e911b38b/data/data3/current/BP-576311980-172.17.0.2-1732437131257/current, will proceed with Du for space computation calculation, 2024-11-24T08:32:11,936 WARN [Thread-1967 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:32:11,938 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x513f4144f66ee8a0 with lease ID 0x7aef19fac75598c2: Processing first storage report for DS-f8a17384-2943-4eaa-8772-d205a7764c32 from datanode DatanodeRegistration(127.0.0.1:44775, datanodeUuid=629e8ff0-48a9-4f53-a549-0e7290ac3a96, infoPort=43051, infoSecurePort=0, ipcPort=39317, storageInfo=lv=-57;cid=testClusterID;nsid=531963860;c=1732437131257) 2024-11-24T08:32:11,938 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x513f4144f66ee8a0 with lease ID 0x7aef19fac75598c2: from storage DS-f8a17384-2943-4eaa-8772-d205a7764c32 node DatanodeRegistration(127.0.0.1:44775, datanodeUuid=629e8ff0-48a9-4f53-a549-0e7290ac3a96, infoPort=43051, infoSecurePort=0, ipcPort=39317, storageInfo=lv=-57;cid=testClusterID;nsid=531963860;c=1732437131257), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:32:11,938 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x513f4144f66ee8a0 with lease ID 0x7aef19fac75598c2: Processing first storage report for DS-dbf79a2f-731d-44be-9db5-4d563225a8f2 from datanode DatanodeRegistration(127.0.0.1:44775, datanodeUuid=629e8ff0-48a9-4f53-a549-0e7290ac3a96, infoPort=43051, infoSecurePort=0, ipcPort=39317, storageInfo=lv=-57;cid=testClusterID;nsid=531963860;c=1732437131257) 2024-11-24T08:32:11,938 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x513f4144f66ee8a0 with lease ID 0x7aef19fac75598c2: from storage DS-dbf79a2f-731d-44be-9db5-4d563225a8f2 node DatanodeRegistration(127.0.0.1:44775, datanodeUuid=629e8ff0-48a9-4f53-a549-0e7290ac3a96, infoPort=43051, infoSecurePort=0, ipcPort=39317, storageInfo=lv=-57;cid=testClusterID;nsid=531963860;c=1732437131257), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:32:12,033 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc 2024-11-24T08:32:12,036 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/cluster_f9b4c950-e343-bd3f-4844-0109e911b38b/zookeeper_0, clientPort=51728, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/cluster_f9b4c950-e343-bd3f-4844-0109e911b38b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/cluster_f9b4c950-e343-bd3f-4844-0109e911b38b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T08:32:12,037 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51728 2024-11-24T08:32:12,037 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:32:12,038 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:32:12,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:32:12,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:32:12,047 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2 with version=8 2024-11-24T08:32:12,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/hbase-staging 2024-11-24T08:32:12,049 INFO [Time-limited test {}] client.ConnectionUtils(128): master/30c28c82771d:0 server-side Connection retries=45 2024-11-24T08:32:12,049 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:32:12,049 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:32:12,049 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:32:12,050 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:32:12,050 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:32:12,050 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T08:32:12,050 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:32:12,050 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40953 2024-11-24T08:32:12,051 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40953 connecting to ZooKeeper ensemble=127.0.0.1:51728 2024-11-24T08:32:12,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:409530x0, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:32:12,058 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40953-0x101491aad930000 connected 2024-11-24T08:32:12,076 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:32:12,078 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:32:12,079 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:32:12,080 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2, hbase.cluster.distributed=false 2024-11-24T08:32:12,081 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:32:12,081 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40953 2024-11-24T08:32:12,081 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40953 2024-11-24T08:32:12,082 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40953 2024-11-24T08:32:12,082 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40953 2024-11-24T08:32:12,082 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40953 2024-11-24T08:32:12,097 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/30c28c82771d:0 server-side Connection retries=45 2024-11-24T08:32:12,097 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:32:12,097 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:32:12,097 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:32:12,097 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:32:12,097 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:32:12,098 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T08:32:12,098 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:32:12,098 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39061 2024-11-24T08:32:12,099 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39061 connecting to ZooKeeper ensemble=127.0.0.1:51728 2024-11-24T08:32:12,100 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:32:12,101 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:32:12,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:390610x0, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:32:12,105 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:390610x0, quorum=127.0.0.1:51728, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:32:12,105 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39061-0x101491aad930001 connected 2024-11-24T08:32:12,105 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T08:32:12,106 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T08:32:12,106 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39061-0x101491aad930001, quorum=127.0.0.1:51728, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T08:32:12,107 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39061-0x101491aad930001, quorum=127.0.0.1:51728, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:32:12,107 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39061 2024-11-24T08:32:12,107 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39061 2024-11-24T08:32:12,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39061 2024-11-24T08:32:12,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39061 2024-11-24T08:32:12,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39061 2024-11-24T08:32:12,120 DEBUG [M:0;30c28c82771d:40953 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;30c28c82771d:40953 2024-11-24T08:32:12,120 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/30c28c82771d,40953,1732437132049 2024-11-24T08:32:12,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39061-0x101491aad930001, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:32:12,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:32:12,122 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/30c28c82771d,40953,1732437132049 2024-11-24T08:32:12,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39061-0x101491aad930001, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T08:32:12,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39061-0x101491aad930001, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:32:12,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:32:12,124 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T08:32:12,124 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/30c28c82771d,40953,1732437132049 from backup master directory 2024-11-24T08:32:12,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39061-0x101491aad930001, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:32:12,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/30c28c82771d,40953,1732437132049 2024-11-24T08:32:12,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:32:12,127 WARN [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:32:12,127 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=30c28c82771d,40953,1732437132049 2024-11-24T08:32:12,131 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/hbase.id] with ID: 4dcae763-9b21-4e5a-af43-958ab6619c1c 2024-11-24T08:32:12,131 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/.tmp/hbase.id 2024-11-24T08:32:12,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:32:12,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:32:12,137 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/.tmp/hbase.id]:[hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/hbase.id] 2024-11-24T08:32:12,147 INFO [master/30c28c82771d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:32:12,147 INFO [master/30c28c82771d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T08:32:12,148 INFO [master/30c28c82771d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-24T08:32:12,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:32:12,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39061-0x101491aad930001, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:32:12,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:32:12,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:32:12,156 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:32:12,157 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T08:32:12,157 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:32:12,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:32:12,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:32:12,164 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store 2024-11-24T08:32:12,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:32:12,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:32:12,170 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:32:12,170 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:32:12,170 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:32:12,170 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:32:12,170 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:32:12,170 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:32:12,170 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:32:12,170 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732437132170Disabling compacts and flushes for region at 1732437132170Disabling writes for close at 1732437132170Writing region close event to WAL at 1732437132170Closed at 1732437132170 2024-11-24T08:32:12,171 WARN [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/.initializing 2024-11-24T08:32:12,171 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/WALs/30c28c82771d,40953,1732437132049 2024-11-24T08:32:12,173 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C40953%2C1732437132049, suffix=, logDir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/WALs/30c28c82771d,40953,1732437132049, archiveDir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/oldWALs, maxLogs=10 2024-11-24T08:32:12,173 INFO [master/30c28c82771d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C40953%2C1732437132049.1732437132173 2024-11-24T08:32:12,177 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/WALs/30c28c82771d,40953,1732437132049/30c28c82771d%2C40953%2C1732437132049.1732437132173 2024-11-24T08:32:12,180 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43051:43051),(127.0.0.1/127.0.0.1:44795:44795)] 2024-11-24T08:32:12,181 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:32:12,181 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:32:12,181 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:32:12,181 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:32:12,182 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:32:12,183 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T08:32:12,183 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:32:12,184 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:32:12,184 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:32:12,185 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T08:32:12,185 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:32:12,185 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:32:12,185 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:32:12,186 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T08:32:12,186 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:32:12,186 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:32:12,186 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:32:12,187 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T08:32:12,187 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:32:12,188 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:32:12,188 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:32:12,188 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:32:12,188 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:32:12,189 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:32:12,189 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:32:12,190 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T08:32:12,191 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:32:12,192 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:32:12,193 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=749078, jitterRate=-0.04749903082847595}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T08:32:12,193 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732437132181Initializing all the Stores at 1732437132182 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437132182Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437132182Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437132182Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437132182Cleaning up temporary data from old regions at 1732437132189 (+7 ms)Region opened successfully at 1732437132193 (+4 ms) 2024-11-24T08:32:12,193 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T08:32:12,196 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@586853c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30c28c82771d/172.17.0.2:0 2024-11-24T08:32:12,197 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T08:32:12,197 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T08:32:12,197 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T08:32:12,197 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T08:32:12,197 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T08:32:12,197 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T08:32:12,198 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T08:32:12,199 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T08:32:12,200 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T08:32:12,201 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T08:32:12,201 INFO [master/30c28c82771d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T08:32:12,202 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T08:32:12,203 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T08:32:12,203 INFO [master/30c28c82771d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T08:32:12,204 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T08:32:12,206 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T08:32:12,206 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T08:32:12,207 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T08:32:12,209 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T08:32:12,210 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T08:32:12,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:32:12,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39061-0x101491aad930001, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:32:12,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:32:12,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39061-0x101491aad930001, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:32:12,212 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=30c28c82771d,40953,1732437132049, sessionid=0x101491aad930000, setting cluster-up flag (Was=false) 2024-11-24T08:32:12,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:32:12,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39061-0x101491aad930001, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:32:12,221 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T08:32:12,221 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30c28c82771d,40953,1732437132049 2024-11-24T08:32:12,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:32:12,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39061-0x101491aad930001, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:32:12,229 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T08:32:12,230 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30c28c82771d,40953,1732437132049 2024-11-24T08:32:12,231 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T08:32:12,233 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T08:32:12,233 INFO [master/30c28c82771d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T08:32:12,233 INFO [master/30c28c82771d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T08:32:12,233 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 30c28c82771d,40953,1732437132049 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T08:32:12,234 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:32:12,234 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:32:12,234 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:32:12,234 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:32:12,234 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/30c28c82771d:0, corePoolSize=10, maxPoolSize=10 2024-11-24T08:32:12,234 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:32:12,234 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/30c28c82771d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:32:12,234 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:32:12,236 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732437162235 2024-11-24T08:32:12,236 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T08:32:12,236 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T08:32:12,236 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:32:12,236 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T08:32:12,236 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T08:32:12,236 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T08:32:12,236 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T08:32:12,236 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T08:32:12,236 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,237 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T08:32:12,237 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T08:32:12,237 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T08:32:12,237 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:32:12,237 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T08:32:12,237 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T08:32:12,237 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T08:32:12,240 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732437132237,5,FailOnTimeoutGroup] 2024-11-24T08:32:12,240 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732437132240,5,FailOnTimeoutGroup] 2024-11-24T08:32:12,240 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,240 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T08:32:12,240 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,241 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:32:12,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:32:12,246 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T08:32:12,246 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2 2024-11-24T08:32:12,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:32:12,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:32:12,254 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:32:12,255 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:32:12,257 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:32:12,257 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:32:12,257 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:32:12,257 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:32:12,258 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:32:12,258 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:32:12,259 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:32:12,259 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:32:12,259 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:32:12,260 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:32:12,260 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:32:12,260 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:32:12,261 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:32:12,261 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:32:12,261 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:32:12,261 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:32:12,262 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740 2024-11-24T08:32:12,262 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740 2024-11-24T08:32:12,263 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:32:12,263 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:32:12,264 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:32:12,265 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:32:12,266 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:32:12,267 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=700097, jitterRate=-0.10978095233440399}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:32:12,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732437132254Initializing all the Stores at 1732437132255 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437132255Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437132255Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437132255Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437132255Cleaning up temporary data from old regions at 1732437132263 (+8 ms)Region opened successfully at 1732437132267 (+4 ms) 2024-11-24T08:32:12,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:32:12,267 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:32:12,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:32:12,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:32:12,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:32:12,268 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:32:12,268 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732437132267Disabling compacts and flushes for region at 1732437132267Disabling writes for close at 1732437132267Writing region close event to WAL at 1732437132268 (+1 ms)Closed at 1732437132268 2024-11-24T08:32:12,269 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:32:12,269 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T08:32:12,269 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T08:32:12,270 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:32:12,271 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T08:32:12,310 INFO [RS:0;30c28c82771d:39061 {}] regionserver.HRegionServer(746): ClusterId : 4dcae763-9b21-4e5a-af43-958ab6619c1c 2024-11-24T08:32:12,310 DEBUG [RS:0;30c28c82771d:39061 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T08:32:12,312 DEBUG [RS:0;30c28c82771d:39061 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T08:32:12,312 DEBUG [RS:0;30c28c82771d:39061 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T08:32:12,314 DEBUG [RS:0;30c28c82771d:39061 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T08:32:12,314 DEBUG [RS:0;30c28c82771d:39061 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f735205, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30c28c82771d/172.17.0.2:0 2024-11-24T08:32:12,326 DEBUG [RS:0;30c28c82771d:39061 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;30c28c82771d:39061 2024-11-24T08:32:12,326 INFO [RS:0;30c28c82771d:39061 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T08:32:12,326 INFO [RS:0;30c28c82771d:39061 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T08:32:12,326 DEBUG [RS:0;30c28c82771d:39061 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T08:32:12,327 INFO [RS:0;30c28c82771d:39061 {}] regionserver.HRegionServer(2659): reportForDuty to master=30c28c82771d,40953,1732437132049 with port=39061, startcode=1732437132097 2024-11-24T08:32:12,327 DEBUG [RS:0;30c28c82771d:39061 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T08:32:12,329 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40293, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T08:32:12,330 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40953 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 30c28c82771d,39061,1732437132097 2024-11-24T08:32:12,330 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40953 {}] master.ServerManager(517): Registering regionserver=30c28c82771d,39061,1732437132097 2024-11-24T08:32:12,331 DEBUG [RS:0;30c28c82771d:39061 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2 2024-11-24T08:32:12,331 DEBUG [RS:0;30c28c82771d:39061 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39473 2024-11-24T08:32:12,331 DEBUG [RS:0;30c28c82771d:39061 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T08:32:12,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:32:12,338 DEBUG [RS:0;30c28c82771d:39061 {}] zookeeper.ZKUtil(111): regionserver:39061-0x101491aad930001, quorum=127.0.0.1:51728, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/30c28c82771d,39061,1732437132097 2024-11-24T08:32:12,338 WARN [RS:0;30c28c82771d:39061 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:32:12,338 INFO [RS:0;30c28c82771d:39061 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:32:12,338 DEBUG [RS:0;30c28c82771d:39061 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/WALs/30c28c82771d,39061,1732437132097 2024-11-24T08:32:12,339 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [30c28c82771d,39061,1732437132097] 2024-11-24T08:32:12,342 INFO [RS:0;30c28c82771d:39061 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T08:32:12,343 INFO [RS:0;30c28c82771d:39061 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T08:32:12,344 INFO [RS:0;30c28c82771d:39061 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T08:32:12,344 INFO [RS:0;30c28c82771d:39061 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,344 INFO [RS:0;30c28c82771d:39061 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T08:32:12,345 INFO [RS:0;30c28c82771d:39061 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T08:32:12,345 INFO [RS:0;30c28c82771d:39061 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,345 DEBUG [RS:0;30c28c82771d:39061 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:32:12,345 DEBUG [RS:0;30c28c82771d:39061 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:32:12,345 DEBUG [RS:0;30c28c82771d:39061 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:32:12,345 DEBUG [RS:0;30c28c82771d:39061 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:32:12,345 DEBUG [RS:0;30c28c82771d:39061 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:32:12,345 DEBUG [RS:0;30c28c82771d:39061 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/30c28c82771d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:32:12,345 DEBUG [RS:0;30c28c82771d:39061 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:32:12,345 DEBUG [RS:0;30c28c82771d:39061 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:32:12,345 DEBUG [RS:0;30c28c82771d:39061 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:32:12,345 DEBUG [RS:0;30c28c82771d:39061 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:32:12,345 DEBUG [RS:0;30c28c82771d:39061 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:32:12,345 DEBUG [RS:0;30c28c82771d:39061 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:32:12,345 DEBUG [RS:0;30c28c82771d:39061 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/30c28c82771d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:32:12,345 DEBUG [RS:0;30c28c82771d:39061 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:32:12,346 INFO [RS:0;30c28c82771d:39061 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,346 INFO [RS:0;30c28c82771d:39061 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,346 INFO [RS:0;30c28c82771d:39061 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,346 INFO [RS:0;30c28c82771d:39061 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,346 INFO [RS:0;30c28c82771d:39061 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,346 INFO [RS:0;30c28c82771d:39061 {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,39061,1732437132097-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:32:12,361 INFO [RS:0;30c28c82771d:39061 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T08:32:12,361 INFO [RS:0;30c28c82771d:39061 {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,39061,1732437132097-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,361 INFO [RS:0;30c28c82771d:39061 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:12,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:12,361 INFO [RS:0;30c28c82771d:39061 {}] regionserver.Replication(171): 30c28c82771d,39061,1732437132097 started 2024-11-24T08:32:12,375 INFO [RS:0;30c28c82771d:39061 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,375 INFO [RS:0;30c28c82771d:39061 {}] regionserver.HRegionServer(1482): Serving as 30c28c82771d,39061,1732437132097, RpcServer on 30c28c82771d/172.17.0.2:39061, sessionid=0x101491aad930001 2024-11-24T08:32:12,375 DEBUG [RS:0;30c28c82771d:39061 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T08:32:12,375 DEBUG [RS:0;30c28c82771d:39061 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 30c28c82771d,39061,1732437132097 2024-11-24T08:32:12,375 DEBUG [RS:0;30c28c82771d:39061 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30c28c82771d,39061,1732437132097' 2024-11-24T08:32:12,375 DEBUG [RS:0;30c28c82771d:39061 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T08:32:12,376 DEBUG [RS:0;30c28c82771d:39061 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T08:32:12,376 DEBUG [RS:0;30c28c82771d:39061 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T08:32:12,376 DEBUG [RS:0;30c28c82771d:39061 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T08:32:12,376 DEBUG [RS:0;30c28c82771d:39061 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 30c28c82771d,39061,1732437132097 2024-11-24T08:32:12,376 DEBUG [RS:0;30c28c82771d:39061 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30c28c82771d,39061,1732437132097' 2024-11-24T08:32:12,376 DEBUG [RS:0;30c28c82771d:39061 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T08:32:12,377 DEBUG [RS:0;30c28c82771d:39061 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T08:32:12,377 DEBUG [RS:0;30c28c82771d:39061 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T08:32:12,377 INFO [RS:0;30c28c82771d:39061 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T08:32:12,377 INFO [RS:0;30c28c82771d:39061 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T08:32:12,422 WARN [30c28c82771d:40953 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T08:32:12,479 INFO [RS:0;30c28c82771d:39061 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C39061%2C1732437132097, suffix=, logDir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/WALs/30c28c82771d,39061,1732437132097, archiveDir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/oldWALs, maxLogs=32 2024-11-24T08:32:12,479 INFO [RS:0;30c28c82771d:39061 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C39061%2C1732437132097.1732437132479 2024-11-24T08:32:12,485 INFO [RS:0;30c28c82771d:39061 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/WALs/30c28c82771d,39061,1732437132097/30c28c82771d%2C39061%2C1732437132097.1732437132479 2024-11-24T08:32:12,486 DEBUG [RS:0;30c28c82771d:39061 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43051:43051),(127.0.0.1/127.0.0.1:44795:44795)] 2024-11-24T08:32:12,672 DEBUG [30c28c82771d:40953 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T08:32:12,672 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=30c28c82771d,39061,1732437132097 2024-11-24T08:32:12,674 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30c28c82771d,39061,1732437132097, state=OPENING 2024-11-24T08:32:12,675 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T08:32:12,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:32:12,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39061-0x101491aad930001, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:32:12,677 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:32:12,677 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:32:12,677 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:32:12,677 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=30c28c82771d,39061,1732437132097}] 2024-11-24T08:32:12,830 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T08:32:12,832 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46187, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T08:32:12,835 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T08:32:12,836 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:32:12,837 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C39061%2C1732437132097.meta, suffix=.meta, logDir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/WALs/30c28c82771d,39061,1732437132097, archiveDir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/oldWALs, maxLogs=32 2024-11-24T08:32:12,837 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C39061%2C1732437132097.meta.1732437132837.meta 2024-11-24T08:32:12,843 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/WALs/30c28c82771d,39061,1732437132097/30c28c82771d%2C39061%2C1732437132097.meta.1732437132837.meta 2024-11-24T08:32:12,844 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44795:44795),(127.0.0.1/127.0.0.1:43051:43051)] 2024-11-24T08:32:12,845 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:32:12,845 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T08:32:12,845 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T08:32:12,845 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T08:32:12,845 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T08:32:12,845 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:32:12,845 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T08:32:12,845 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T08:32:12,846 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:32:12,847 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:32:12,847 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:32:12,847 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:32:12,848 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:32:12,848 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:32:12,848 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:32:12,848 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:32:12,849 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:32:12,849 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:32:12,849 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:32:12,850 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:32:12,850 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:32:12,850 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:32:12,850 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:32:12,850 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:32:12,851 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:32:12,851 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740 2024-11-24T08:32:12,852 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740 2024-11-24T08:32:12,853 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:32:12,853 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:32:12,853 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:32:12,854 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:32:12,855 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=750069, jitterRate=-0.04623851180076599}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:32:12,855 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T08:32:12,856 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732437132845Writing region info on filesystem at 1732437132845Initializing all the Stores at 1732437132846 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437132846Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437132846Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437132846Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437132846Cleaning up temporary data from old regions at 1732437132853 (+7 ms)Running coprocessor post-open hooks at 1732437132855 (+2 ms)Region opened successfully at 1732437132856 (+1 ms) 2024-11-24T08:32:12,857 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732437132830 2024-11-24T08:32:12,859 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T08:32:12,859 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T08:32:12,860 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=30c28c82771d,39061,1732437132097 2024-11-24T08:32:12,861 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30c28c82771d,39061,1732437132097, state=OPEN 2024-11-24T08:32:12,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:32:12,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39061-0x101491aad930001, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:32:12,865 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:32:12,865 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=30c28c82771d,39061,1732437132097 2024-11-24T08:32:12,865 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:32:12,868 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T08:32:12,868 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=30c28c82771d,39061,1732437132097 in 188 msec 2024-11-24T08:32:12,870 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T08:32:12,870 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 599 msec 2024-11-24T08:32:12,871 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:32:12,871 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T08:32:12,872 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:32:12,872 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30c28c82771d,39061,1732437132097, seqNum=-1] 2024-11-24T08:32:12,873 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:32:12,874 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59779, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:32:12,878 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 646 msec 2024-11-24T08:32:12,878 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732437132878, completionTime=-1 2024-11-24T08:32:12,878 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T08:32:12,878 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T08:32:12,880 INFO [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T08:32:12,880 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732437192880 2024-11-24T08:32:12,880 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732437252880 2024-11-24T08:32:12,880 INFO [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-24T08:32:12,880 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,40953,1732437132049-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,880 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,40953,1732437132049-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,880 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,40953,1732437132049-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,881 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-30c28c82771d:40953, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,881 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,881 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,882 DEBUG [master/30c28c82771d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T08:32:12,884 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.757sec 2024-11-24T08:32:12,884 INFO [master/30c28c82771d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T08:32:12,884 INFO [master/30c28c82771d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T08:32:12,884 INFO [master/30c28c82771d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T08:32:12,884 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T08:32:12,884 INFO [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T08:32:12,884 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,40953,1732437132049-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:32:12,884 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,40953,1732437132049-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T08:32:12,886 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T08:32:12,886 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T08:32:12,886 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,40953,1732437132049-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:32:12,910 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@dc4b9aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:32:12,910 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 30c28c82771d,40953,-1 for getting cluster id 2024-11-24T08:32:12,910 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T08:32:12,912 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4dcae763-9b21-4e5a-af43-958ab6619c1c' 2024-11-24T08:32:12,912 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T08:32:12,912 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4dcae763-9b21-4e5a-af43-958ab6619c1c" 2024-11-24T08:32:12,912 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d8850d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:32:12,912 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [30c28c82771d,40953,-1] 2024-11-24T08:32:12,912 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T08:32:12,913 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:32:12,913 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56584, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T08:32:12,914 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@662150e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:32:12,914 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:32:12,915 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30c28c82771d,39061,1732437132097, seqNum=-1] 2024-11-24T08:32:12,915 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:32:12,916 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57672, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:32:12,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=30c28c82771d,40953,1732437132049 2024-11-24T08:32:12,918 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:32:12,920 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T08:32:12,920 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T08:32:12,921 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 30c28c82771d,40953,1732437132049 2024-11-24T08:32:12,921 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@539e25ee 2024-11-24T08:32:12,921 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T08:32:12,922 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56594, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T08:32:12,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40953 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T08:32:12,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40953 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T08:32:12,923 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40953 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:32:12,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40953 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-24T08:32:12,926 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T08:32:12,926 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:32:12,926 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40953 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-24T08:32:12,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40953 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T08:32:12,927 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T08:32:12,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741835_1011 (size=381) 2024-11-24T08:32:12,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741835_1011 (size=381) 2024-11-24T08:32:12,936 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 189e68d79a69ab5600f63dfb617fc52c, NAME => 'TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2 2024-11-24T08:32:12,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741836_1012 (size=64) 2024-11-24T08:32:12,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741836_1012 (size=64) 2024-11-24T08:32:12,943 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:32:12,943 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 189e68d79a69ab5600f63dfb617fc52c, disabling compactions & flushes 2024-11-24T08:32:12,943 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c. 2024-11-24T08:32:12,943 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c. 2024-11-24T08:32:12,943 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c. after waiting 0 ms 2024-11-24T08:32:12,943 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c. 2024-11-24T08:32:12,943 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c. 2024-11-24T08:32:12,943 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 189e68d79a69ab5600f63dfb617fc52c: Waiting for close lock at 1732437132943Disabling compacts and flushes for region at 1732437132943Disabling writes for close at 1732437132943Writing region close event to WAL at 1732437132943Closed at 1732437132943 2024-11-24T08:32:12,944 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T08:32:12,945 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732437132944"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732437132944"}]},"ts":"1732437132944"} 2024-11-24T08:32:12,947 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T08:32:12,948 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T08:32:12,948 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732437132948"}]},"ts":"1732437132948"} 2024-11-24T08:32:12,950 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-24T08:32:12,950 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=189e68d79a69ab5600f63dfb617fc52c, ASSIGN}] 2024-11-24T08:32:12,951 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=189e68d79a69ab5600f63dfb617fc52c, ASSIGN 2024-11-24T08:32:12,952 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=189e68d79a69ab5600f63dfb617fc52c, ASSIGN; state=OFFLINE, location=30c28c82771d,39061,1732437132097; forceNewPlan=false, retain=false 2024-11-24T08:32:13,103 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=189e68d79a69ab5600f63dfb617fc52c, regionState=OPENING, regionLocation=30c28c82771d,39061,1732437132097 2024-11-24T08:32:13,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=189e68d79a69ab5600f63dfb617fc52c, ASSIGN because future has completed 2024-11-24T08:32:13,106 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 189e68d79a69ab5600f63dfb617fc52c, server=30c28c82771d,39061,1732437132097}] 2024-11-24T08:32:13,262 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c. 2024-11-24T08:32:13,262 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 189e68d79a69ab5600f63dfb617fc52c, NAME => 'TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:32:13,263 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:13,263 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:32:13,263 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:13,263 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:13,264 INFO [StoreOpener-189e68d79a69ab5600f63dfb617fc52c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:13,265 INFO [StoreOpener-189e68d79a69ab5600f63dfb617fc52c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 189e68d79a69ab5600f63dfb617fc52c columnFamilyName info 2024-11-24T08:32:13,265 DEBUG [StoreOpener-189e68d79a69ab5600f63dfb617fc52c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:32:13,266 INFO [StoreOpener-189e68d79a69ab5600f63dfb617fc52c-1 {}] regionserver.HStore(327): Store=189e68d79a69ab5600f63dfb617fc52c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:32:13,266 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:13,266 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:13,267 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:13,267 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:13,267 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:13,268 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:13,270 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:32:13,270 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 189e68d79a69ab5600f63dfb617fc52c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=790707, jitterRate=0.005437105894088745}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T08:32:13,271 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:13,271 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 189e68d79a69ab5600f63dfb617fc52c: Running coprocessor pre-open hook at 1732437133263Writing region info on filesystem at 1732437133263Initializing all the Stores at 1732437133264 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437133264Cleaning up temporary data from old regions at 1732437133267 (+3 ms)Running coprocessor post-open hooks at 1732437133271 (+4 ms)Region opened successfully at 1732437133271 2024-11-24T08:32:13,272 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c., pid=6, masterSystemTime=1732437133258 2024-11-24T08:32:13,274 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c. 2024-11-24T08:32:13,274 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c. 2024-11-24T08:32:13,275 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=189e68d79a69ab5600f63dfb617fc52c, regionState=OPEN, openSeqNum=2, regionLocation=30c28c82771d,39061,1732437132097 2024-11-24T08:32:13,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 189e68d79a69ab5600f63dfb617fc52c, server=30c28c82771d,39061,1732437132097 because future has completed 2024-11-24T08:32:13,280 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T08:32:13,280 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 189e68d79a69ab5600f63dfb617fc52c, server=30c28c82771d,39061,1732437132097 in 172 msec 2024-11-24T08:32:13,283 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T08:32:13,283 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=189e68d79a69ab5600f63dfb617fc52c, ASSIGN in 330 msec 2024-11-24T08:32:13,284 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T08:32:13,284 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732437133284"}]},"ts":"1732437133284"} 2024-11-24T08:32:13,286 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-24T08:32:13,287 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T08:32:13,289 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 363 msec 2024-11-24T08:32:13,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:13,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:14,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:14,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:15,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:15,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:15,568 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:15,568 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:15,568 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:15,569 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:15,569 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:15,569 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:15,569 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:15,569 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:15,584 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:15,584 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:15,584 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:15,584 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:15,584 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:15,585 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:15,589 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:15,589 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:15,589 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:15,592 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:16,099 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T08:32:16,100 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:16,100 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:16,100 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:16,100 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:16,100 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:16,101 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:16,101 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:16,101 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:16,122 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:16,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:16,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:16,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:16,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:16,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:16,129 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:16,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:16,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:16,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:16,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:16,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:17,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:17,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:18,342 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T08:32:18,342 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-24T08:32:18,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:18,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:19,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:19,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:19,522 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-24T08:32:19,522 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-24T08:32:19,523 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T08:32:20,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:20,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:21,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:21,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:22,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:22,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:22,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40953 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T08:32:22,987 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-24T08:32:22,987 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-24T08:32:22,990 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-24T08:32:22,990 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c. 2024-11-24T08:32:22,992 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c., hostname=30c28c82771d,39061,1732437132097, seqNum=2] 2024-11-24T08:32:23,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(8855): Flush requested on 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:23,004 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 189e68d79a69ab5600f63dfb617fc52c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:32:23,021 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/3445c8bfa5b14ea28f6f32368c44a4b0 is 1080, key is row0001/info:/1732437142993/Put/seqid=0 2024-11-24T08:32:23,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741837_1013 (size=12509) 2024-11-24T08:32:23,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741837_1013 (size=12509) 2024-11-24T08:32:23,027 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/3445c8bfa5b14ea28f6f32368c44a4b0 2024-11-24T08:32:23,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/3445c8bfa5b14ea28f6f32368c44a4b0 as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/3445c8bfa5b14ea28f6f32368c44a4b0 2024-11-24T08:32:23,041 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/3445c8bfa5b14ea28f6f32368c44a4b0, entries=7, sequenceid=11, filesize=12.2 K 2024-11-24T08:32:23,042 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for 189e68d79a69ab5600f63dfb617fc52c in 38ms, sequenceid=11, compaction requested=false 2024-11-24T08:32:23,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 189e68d79a69ab5600f63dfb617fc52c: 2024-11-24T08:32:23,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(8855): Flush requested on 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:23,044 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 189e68d79a69ab5600f63dfb617fc52c 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-24T08:32:23,049 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/b00d908acc5d420386df74a9a12ca8da is 1080, key is row0008/info:/1732437143005/Put/seqid=0 2024-11-24T08:32:23,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741838_1014 (size=29761) 2024-11-24T08:32:23,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741838_1014 (size=29761) 2024-11-24T08:32:23,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/b00d908acc5d420386df74a9a12ca8da 2024-11-24T08:32:23,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/b00d908acc5d420386df74a9a12ca8da as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/b00d908acc5d420386df74a9a12ca8da 2024-11-24T08:32:23,065 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/b00d908acc5d420386df74a9a12ca8da, entries=23, sequenceid=37, filesize=29.1 K 2024-11-24T08:32:23,066 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 189e68d79a69ab5600f63dfb617fc52c in 22ms, sequenceid=37, compaction requested=false 2024-11-24T08:32:23,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 189e68d79a69ab5600f63dfb617fc52c: 2024-11-24T08:32:23,066 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-24T08:32:23,066 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:32:23,067 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/b00d908acc5d420386df74a9a12ca8da because midkey is the same as first or last row 2024-11-24T08:32:23,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:23,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:24,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:24,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:25,025 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T08:32:25,026 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:25,026 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:25,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:25,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:25,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:25,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:25,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:25,029 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:25,057 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:25,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(8855): Flush requested on 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:25,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:25,058 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 189e68d79a69ab5600f63dfb617fc52c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:32:25,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:25,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:25,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:25,059 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:25,062 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/eec542f27ebe4cffbe97ed63e090f907 is 1080, key is row0031/info:/1732437143045/Put/seqid=0 2024-11-24T08:32:25,063 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:25,063 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:25,063 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:25,066 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:25,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741839_1015 (size=12509) 2024-11-24T08:32:25,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741839_1015 (size=12509) 2024-11-24T08:32:25,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=189e68d79a69ab5600f63dfb617fc52c, server=30c28c82771d,39061,1732437132097 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-24T08:32:25,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:57672 deadline: 1732437155096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=189e68d79a69ab5600f63dfb617fc52c, server=30c28c82771d,39061,1732437132097 2024-11-24T08:32:25,121 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c., hostname=30c28c82771d,39061,1732437132097, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c., hostname=30c28c82771d,39061,1732437132097, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=189e68d79a69ab5600f63dfb617fc52c, server=30c28c82771d,39061,1732437132097 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T08:32:25,121 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c., hostname=30c28c82771d,39061,1732437132097, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=189e68d79a69ab5600f63dfb617fc52c, server=30c28c82771d,39061,1732437132097 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T08:32:25,121 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c., hostname=30c28c82771d,39061,1732437132097, seqNum=2 because the exception is null or not the one we care about 2024-11-24T08:32:25,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:25,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:25,469 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/eec542f27ebe4cffbe97ed63e090f907 2024-11-24T08:32:25,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/eec542f27ebe4cffbe97ed63e090f907 as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/eec542f27ebe4cffbe97ed63e090f907 2024-11-24T08:32:25,479 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/eec542f27ebe4cffbe97ed63e090f907, entries=7, sequenceid=47, filesize=12.2 K 2024-11-24T08:32:25,480 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 189e68d79a69ab5600f63dfb617fc52c in 422ms, sequenceid=47, compaction requested=true 2024-11-24T08:32:25,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 189e68d79a69ab5600f63dfb617fc52c: 2024-11-24T08:32:25,481 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-24T08:32:25,481 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:32:25,481 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/b00d908acc5d420386df74a9a12ca8da because midkey is the same as first or last row 2024-11-24T08:32:25,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 189e68d79a69ab5600f63dfb617fc52c:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:32:25,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:25,481 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:32:25,482 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:32:25,482 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1541): 189e68d79a69ab5600f63dfb617fc52c/info is initiating minor compaction (all files) 2024-11-24T08:32:25,482 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 189e68d79a69ab5600f63dfb617fc52c/info in TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c. 2024-11-24T08:32:25,482 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/3445c8bfa5b14ea28f6f32368c44a4b0, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/b00d908acc5d420386df74a9a12ca8da, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/eec542f27ebe4cffbe97ed63e090f907] into tmpdir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp, totalSize=53.5 K 2024-11-24T08:32:25,483 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3445c8bfa5b14ea28f6f32368c44a4b0, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732437142993 2024-11-24T08:32:25,483 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting b00d908acc5d420386df74a9a12ca8da, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732437143005 2024-11-24T08:32:25,483 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting eec542f27ebe4cffbe97ed63e090f907, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732437143045 2024-11-24T08:32:25,494 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 189e68d79a69ab5600f63dfb617fc52c#info#compaction#57 average throughput is 37.97 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:32:25,495 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/d857c6290c3c4758a62f3402768999d5 is 1080, key is row0001/info:/1732437142993/Put/seqid=0 2024-11-24T08:32:25,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741840_1016 (size=44978) 2024-11-24T08:32:25,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741840_1016 (size=44978) 2024-11-24T08:32:25,508 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/d857c6290c3c4758a62f3402768999d5 as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/d857c6290c3c4758a62f3402768999d5 2024-11-24T08:32:25,514 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 189e68d79a69ab5600f63dfb617fc52c/info of 189e68d79a69ab5600f63dfb617fc52c into d857c6290c3c4758a62f3402768999d5(size=43.9 K), total size for store is 43.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:32:25,514 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 189e68d79a69ab5600f63dfb617fc52c: 2024-11-24T08:32:25,515 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c., storeName=189e68d79a69ab5600f63dfb617fc52c/info, priority=13, startTime=1732437145481; duration=0sec 2024-11-24T08:32:25,515 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=43.9 K, sizeToCheck=16.0 K 2024-11-24T08:32:25,515 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:32:25,515 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/d857c6290c3c4758a62f3402768999d5 because midkey is the same as first or last row 2024-11-24T08:32:25,515 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=43.9 K, sizeToCheck=16.0 K 2024-11-24T08:32:25,515 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:32:25,515 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/d857c6290c3c4758a62f3402768999d5 because midkey is the same as first or last row 2024-11-24T08:32:25,515 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=43.9 K, sizeToCheck=16.0 K 2024-11-24T08:32:25,515 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:32:25,515 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/d857c6290c3c4758a62f3402768999d5 because midkey is the same as first or last row 2024-11-24T08:32:25,515 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:25,515 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 189e68d79a69ab5600f63dfb617fc52c:info 2024-11-24T08:32:26,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:26,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:27,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:27,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:28,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:28,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:29,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:29,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:30,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:30,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:31,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:31,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:32,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:32,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:33,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:33,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:34,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:34,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:35,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(8855): Flush requested on 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:35,157 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 189e68d79a69ab5600f63dfb617fc52c 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-24T08:32:35,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/a1ff6222cdf649e289a363ac8c9de8b2 is 1080, key is row0038/info:/1732437145059/Put/seqid=0 2024-11-24T08:32:35,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741841_1017 (size=29761) 2024-11-24T08:32:35,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741841_1017 (size=29761) 2024-11-24T08:32:35,167 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/a1ff6222cdf649e289a363ac8c9de8b2 2024-11-24T08:32:35,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/a1ff6222cdf649e289a363ac8c9de8b2 as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/a1ff6222cdf649e289a363ac8c9de8b2 2024-11-24T08:32:35,177 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/a1ff6222cdf649e289a363ac8c9de8b2, entries=23, sequenceid=74, filesize=29.1 K 2024-11-24T08:32:35,178 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=4.20 KB/4304 for 189e68d79a69ab5600f63dfb617fc52c in 21ms, sequenceid=74, compaction requested=false 2024-11-24T08:32:35,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 189e68d79a69ab5600f63dfb617fc52c: 2024-11-24T08:32:35,178 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.0 K, sizeToCheck=16.0 K 2024-11-24T08:32:35,178 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:32:35,178 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/d857c6290c3c4758a62f3402768999d5 because midkey is the same as first or last row 2024-11-24T08:32:35,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:35,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:36,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:36,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:37,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(8855): Flush requested on 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:37,171 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 189e68d79a69ab5600f63dfb617fc52c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:32:37,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/1a18d14989a44db19c06a3c6b44361dc is 1080, key is row0061/info:/1732437155159/Put/seqid=0 2024-11-24T08:32:37,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741842_1018 (size=12509) 2024-11-24T08:32:37,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741842_1018 (size=12509) 2024-11-24T08:32:37,183 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=84 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/1a18d14989a44db19c06a3c6b44361dc 2024-11-24T08:32:37,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/1a18d14989a44db19c06a3c6b44361dc as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/1a18d14989a44db19c06a3c6b44361dc 2024-11-24T08:32:37,195 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/1a18d14989a44db19c06a3c6b44361dc, entries=7, sequenceid=84, filesize=12.2 K 2024-11-24T08:32:37,196 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for 189e68d79a69ab5600f63dfb617fc52c in 25ms, sequenceid=84, compaction requested=true 2024-11-24T08:32:37,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 189e68d79a69ab5600f63dfb617fc52c: 2024-11-24T08:32:37,196 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=85.2 K, sizeToCheck=16.0 K 2024-11-24T08:32:37,196 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:32:37,196 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/d857c6290c3c4758a62f3402768999d5 because midkey is the same as first or last row 2024-11-24T08:32:37,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 189e68d79a69ab5600f63dfb617fc52c:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:32:37,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:37,196 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:32:37,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(8855): Flush requested on 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:37,197 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 189e68d79a69ab5600f63dfb617fc52c 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-24T08:32:37,198 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 87248 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:32:37,198 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1541): 189e68d79a69ab5600f63dfb617fc52c/info is initiating minor compaction (all files) 2024-11-24T08:32:37,198 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 189e68d79a69ab5600f63dfb617fc52c/info in TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c. 2024-11-24T08:32:37,198 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/d857c6290c3c4758a62f3402768999d5, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/a1ff6222cdf649e289a363ac8c9de8b2, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/1a18d14989a44db19c06a3c6b44361dc] into tmpdir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp, totalSize=85.2 K 2024-11-24T08:32:37,198 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting d857c6290c3c4758a62f3402768999d5, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732437142993 2024-11-24T08:32:37,199 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting a1ff6222cdf649e289a363ac8c9de8b2, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732437145059 2024-11-24T08:32:37,199 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1a18d14989a44db19c06a3c6b44361dc, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=84, earliestPutTs=1732437155159 2024-11-24T08:32:37,201 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/8c0ff2f455654727b0fb76520d76642f is 1080, key is row0068/info:/1732437157172/Put/seqid=0 2024-11-24T08:32:37,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741843_1019 (size=22222) 2024-11-24T08:32:37,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741843_1019 (size=22222) 2024-11-24T08:32:37,208 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=103 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/8c0ff2f455654727b0fb76520d76642f 2024-11-24T08:32:37,214 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 189e68d79a69ab5600f63dfb617fc52c#info#compaction#61 average throughput is 34.38 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:32:37,215 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/c6b28f93e4784ef69bc5662898674aab is 1080, key is row0001/info:/1732437142993/Put/seqid=0 2024-11-24T08:32:37,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/8c0ff2f455654727b0fb76520d76642f as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/8c0ff2f455654727b0fb76520d76642f 2024-11-24T08:32:37,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741844_1020 (size=77532) 2024-11-24T08:32:37,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741844_1020 (size=77532) 2024-11-24T08:32:37,222 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/8c0ff2f455654727b0fb76520d76642f, entries=16, sequenceid=103, filesize=21.7 K 2024-11-24T08:32:37,223 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=13.66 KB/13988 for 189e68d79a69ab5600f63dfb617fc52c in 26ms, sequenceid=103, compaction requested=false 2024-11-24T08:32:37,223 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 189e68d79a69ab5600f63dfb617fc52c: 2024-11-24T08:32:37,223 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=106.9 K, sizeToCheck=16.0 K 2024-11-24T08:32:37,223 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:32:37,223 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/d857c6290c3c4758a62f3402768999d5 because midkey is the same as first or last row 2024-11-24T08:32:37,226 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/c6b28f93e4784ef69bc5662898674aab as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/c6b28f93e4784ef69bc5662898674aab 2024-11-24T08:32:37,231 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 189e68d79a69ab5600f63dfb617fc52c/info of 189e68d79a69ab5600f63dfb617fc52c into c6b28f93e4784ef69bc5662898674aab(size=75.7 K), total size for store is 97.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:32:37,231 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 189e68d79a69ab5600f63dfb617fc52c: 2024-11-24T08:32:37,232 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c., storeName=189e68d79a69ab5600f63dfb617fc52c/info, priority=13, startTime=1732437157196; duration=0sec 2024-11-24T08:32:37,232 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=97.4 K, sizeToCheck=16.0 K 2024-11-24T08:32:37,232 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:32:37,232 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=97.4 K, sizeToCheck=16.0 K 2024-11-24T08:32:37,232 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:32:37,232 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=97.4 K, sizeToCheck=16.0 K 2024-11-24T08:32:37,232 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T08:32:37,233 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:37,233 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:37,233 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 189e68d79a69ab5600f63dfb617fc52c:info 2024-11-24T08:32:37,234 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40953 {}] assignment.AssignmentManager(1363): Split request from 30c28c82771d,39061,1732437132097, parent={ENCODED => 189e68d79a69ab5600f63dfb617fc52c, NAME => 'TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-24T08:32:37,240 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40953 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=30c28c82771d,39061,1732437132097 2024-11-24T08:32:37,245 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40953 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=189e68d79a69ab5600f63dfb617fc52c, daughterA=c20b5290c32295c6ca6d4936601ba4d2, daughterB=fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:37,246 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=189e68d79a69ab5600f63dfb617fc52c, daughterA=c20b5290c32295c6ca6d4936601ba4d2, daughterB=fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:37,246 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=189e68d79a69ab5600f63dfb617fc52c, daughterA=c20b5290c32295c6ca6d4936601ba4d2, daughterB=fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:37,246 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=189e68d79a69ab5600f63dfb617fc52c, daughterA=c20b5290c32295c6ca6d4936601ba4d2, daughterB=fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:37,253 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=189e68d79a69ab5600f63dfb617fc52c, UNASSIGN}] 2024-11-24T08:32:37,254 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=189e68d79a69ab5600f63dfb617fc52c, UNASSIGN 2024-11-24T08:32:37,256 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=189e68d79a69ab5600f63dfb617fc52c, regionState=CLOSING, regionLocation=30c28c82771d,39061,1732437132097 2024-11-24T08:32:37,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=189e68d79a69ab5600f63dfb617fc52c, UNASSIGN because future has completed 2024-11-24T08:32:37,259 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-24T08:32:37,259 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 189e68d79a69ab5600f63dfb617fc52c, server=30c28c82771d,39061,1732437132097}] 2024-11-24T08:32:37,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:37,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:37,417 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:37,417 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-24T08:32:37,418 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 189e68d79a69ab5600f63dfb617fc52c, disabling compactions & flushes 2024-11-24T08:32:37,418 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c. 2024-11-24T08:32:37,418 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c. 2024-11-24T08:32:37,418 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c. after waiting 0 ms 2024-11-24T08:32:37,418 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c. 2024-11-24T08:32:37,418 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 189e68d79a69ab5600f63dfb617fc52c 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-24T08:32:37,422 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/cf8c5ea67e134858bcdc6d056cadd1e7 is 1080, key is row0084/info:/1732437157198/Put/seqid=0 2024-11-24T08:32:37,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741845_1021 (size=18987) 2024-11-24T08:32:37,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741845_1021 (size=18987) 2024-11-24T08:32:37,428 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/cf8c5ea67e134858bcdc6d056cadd1e7 2024-11-24T08:32:37,434 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/.tmp/info/cf8c5ea67e134858bcdc6d056cadd1e7 as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/cf8c5ea67e134858bcdc6d056cadd1e7 2024-11-24T08:32:37,438 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/cf8c5ea67e134858bcdc6d056cadd1e7, entries=13, sequenceid=120, filesize=18.5 K 2024-11-24T08:32:37,439 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=0 B/0 for 189e68d79a69ab5600f63dfb617fc52c in 21ms, sequenceid=120, compaction requested=true 2024-11-24T08:32:37,440 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/3445c8bfa5b14ea28f6f32368c44a4b0, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/b00d908acc5d420386df74a9a12ca8da, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/d857c6290c3c4758a62f3402768999d5, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/eec542f27ebe4cffbe97ed63e090f907, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/a1ff6222cdf649e289a363ac8c9de8b2, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/1a18d14989a44db19c06a3c6b44361dc] to archive 2024-11-24T08:32:37,441 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T08:32:37,443 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/3445c8bfa5b14ea28f6f32368c44a4b0 to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/3445c8bfa5b14ea28f6f32368c44a4b0 2024-11-24T08:32:37,444 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/b00d908acc5d420386df74a9a12ca8da to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/b00d908acc5d420386df74a9a12ca8da 2024-11-24T08:32:37,445 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/d857c6290c3c4758a62f3402768999d5 to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/d857c6290c3c4758a62f3402768999d5 2024-11-24T08:32:37,446 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/eec542f27ebe4cffbe97ed63e090f907 to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/eec542f27ebe4cffbe97ed63e090f907 2024-11-24T08:32:37,447 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/a1ff6222cdf649e289a363ac8c9de8b2 to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/a1ff6222cdf649e289a363ac8c9de8b2 2024-11-24T08:32:37,448 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/1a18d14989a44db19c06a3c6b44361dc to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/1a18d14989a44db19c06a3c6b44361dc 2024-11-24T08:32:37,453 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/recovered.edits/123.seqid, newMaxSeqId=123, maxSeqId=1 2024-11-24T08:32:37,454 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c. 2024-11-24T08:32:37,454 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 189e68d79a69ab5600f63dfb617fc52c: Waiting for close lock at 1732437157418Running coprocessor pre-close hooks at 1732437157418Disabling compacts and flushes for region at 1732437157418Disabling writes for close at 1732437157418Obtaining lock to block concurrent updates at 1732437157418Preparing flush snapshotting stores in 189e68d79a69ab5600f63dfb617fc52c at 1732437157418Finished memstore snapshotting TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c., syncing WAL and waiting on mvcc, flushsize=dataSize=13988, getHeapSize=15216, getOffHeapSize=0, getCellsCount=13 at 1732437157418Flushing stores of TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c. at 1732437157419 (+1 ms)Flushing 189e68d79a69ab5600f63dfb617fc52c/info: creating writer at 1732437157419Flushing 189e68d79a69ab5600f63dfb617fc52c/info: appending metadata at 1732437157422 (+3 ms)Flushing 189e68d79a69ab5600f63dfb617fc52c/info: closing flushed file at 1732437157422Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b127522: reopening flushed file at 1732437157433 (+11 ms)Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=0 B/0 for 189e68d79a69ab5600f63dfb617fc52c in 21ms, sequenceid=120, compaction requested=true at 1732437157439 (+6 ms)Writing region close event to WAL at 1732437157450 (+11 ms)Running coprocessor post-close hooks at 1732437157454 (+4 ms)Closed at 1732437157454 2024-11-24T08:32:37,456 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:37,457 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=189e68d79a69ab5600f63dfb617fc52c, regionState=CLOSED 2024-11-24T08:32:37,459 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 189e68d79a69ab5600f63dfb617fc52c, server=30c28c82771d,39061,1732437132097 because future has completed 2024-11-24T08:32:37,462 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-24T08:32:37,462 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 189e68d79a69ab5600f63dfb617fc52c, server=30c28c82771d,39061,1732437132097 in 201 msec 2024-11-24T08:32:37,464 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-24T08:32:37,464 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=189e68d79a69ab5600f63dfb617fc52c, UNASSIGN in 209 msec 2024-11-24T08:32:37,471 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:32:37,474 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=189e68d79a69ab5600f63dfb617fc52c, threads=3 2024-11-24T08:32:37,476 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/8c0ff2f455654727b0fb76520d76642f for region: 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:37,476 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/c6b28f93e4784ef69bc5662898674aab for region: 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:37,476 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/cf8c5ea67e134858bcdc6d056cadd1e7 for region: 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:37,485 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/8c0ff2f455654727b0fb76520d76642f, top=true 2024-11-24T08:32:37,485 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/cf8c5ea67e134858bcdc6d056cadd1e7, top=true 2024-11-24T08:32:37,490 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/TestLogRolling-testLogRolling=189e68d79a69ab5600f63dfb617fc52c-8c0ff2f455654727b0fb76520d76642f for child: fe9ce968c7bdd01663e70b82d2d6d505, parent: 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:37,490 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/8c0ff2f455654727b0fb76520d76642f for region: 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:37,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741846_1022 (size=27) 2024-11-24T08:32:37,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741846_1022 (size=27) 2024-11-24T08:32:37,492 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/TestLogRolling-testLogRolling=189e68d79a69ab5600f63dfb617fc52c-cf8c5ea67e134858bcdc6d056cadd1e7 for child: fe9ce968c7bdd01663e70b82d2d6d505, parent: 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:37,492 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/cf8c5ea67e134858bcdc6d056cadd1e7 for region: 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:37,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741847_1023 (size=27) 2024-11-24T08:32:37,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741847_1023 (size=27) 2024-11-24T08:32:37,500 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/c6b28f93e4784ef69bc5662898674aab for region: 189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:32:37,502 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 189e68d79a69ab5600f63dfb617fc52c Daughter A: [hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/c20b5290c32295c6ca6d4936601ba4d2/info/c6b28f93e4784ef69bc5662898674aab.189e68d79a69ab5600f63dfb617fc52c] storefiles, Daughter B: [hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/TestLogRolling-testLogRolling=189e68d79a69ab5600f63dfb617fc52c-8c0ff2f455654727b0fb76520d76642f, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/c6b28f93e4784ef69bc5662898674aab.189e68d79a69ab5600f63dfb617fc52c, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/TestLogRolling-testLogRolling=189e68d79a69ab5600f63dfb617fc52c-cf8c5ea67e134858bcdc6d056cadd1e7] storefiles. 2024-11-24T08:32:37,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741848_1024 (size=71) 2024-11-24T08:32:37,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741848_1024 (size=71) 2024-11-24T08:32:37,511 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:32:37,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741849_1025 (size=71) 2024-11-24T08:32:37,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741849_1025 (size=71) 2024-11-24T08:32:37,524 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:32:37,535 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/c20b5290c32295c6ca6d4936601ba4d2/recovered.edits/123.seqid, newMaxSeqId=123, maxSeqId=-1 2024-11-24T08:32:37,537 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/recovered.edits/123.seqid, newMaxSeqId=123, maxSeqId=-1 2024-11-24T08:32:37,540 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732437157539"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732437157539"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732437157539"}]},"ts":"1732437157539"} 2024-11-24T08:32:37,540 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732437157539"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732437157539"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732437157539"}]},"ts":"1732437157539"} 2024-11-24T08:32:37,540 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732437157539"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732437157539"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732437157539"}]},"ts":"1732437157539"} 2024-11-24T08:32:37,562 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c20b5290c32295c6ca6d4936601ba4d2, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fe9ce968c7bdd01663e70b82d2d6d505, ASSIGN}] 2024-11-24T08:32:37,564 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c20b5290c32295c6ca6d4936601ba4d2, ASSIGN 2024-11-24T08:32:37,564 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fe9ce968c7bdd01663e70b82d2d6d505, ASSIGN 2024-11-24T08:32:37,565 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c20b5290c32295c6ca6d4936601ba4d2, ASSIGN; state=SPLITTING_NEW, location=30c28c82771d,39061,1732437132097; forceNewPlan=false, retain=false 2024-11-24T08:32:37,565 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fe9ce968c7bdd01663e70b82d2d6d505, ASSIGN; state=SPLITTING_NEW, location=30c28c82771d,39061,1732437132097; forceNewPlan=false, retain=false 2024-11-24T08:32:37,715 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=c20b5290c32295c6ca6d4936601ba4d2, regionState=OPENING, regionLocation=30c28c82771d,39061,1732437132097 2024-11-24T08:32:37,715 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=fe9ce968c7bdd01663e70b82d2d6d505, regionState=OPENING, regionLocation=30c28c82771d,39061,1732437132097 2024-11-24T08:32:37,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c20b5290c32295c6ca6d4936601ba4d2, ASSIGN because future has completed 2024-11-24T08:32:37,718 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure c20b5290c32295c6ca6d4936601ba4d2, server=30c28c82771d,39061,1732437132097}] 2024-11-24T08:32:37,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fe9ce968c7bdd01663e70b82d2d6d505, ASSIGN because future has completed 2024-11-24T08:32:37,719 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure fe9ce968c7bdd01663e70b82d2d6d505, server=30c28c82771d,39061,1732437132097}] 2024-11-24T08:32:37,874 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2. 2024-11-24T08:32:37,874 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => c20b5290c32295c6ca6d4936601ba4d2, NAME => 'TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-24T08:32:37,874 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling c20b5290c32295c6ca6d4936601ba4d2 2024-11-24T08:32:37,874 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:32:37,875 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for c20b5290c32295c6ca6d4936601ba4d2 2024-11-24T08:32:37,875 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for c20b5290c32295c6ca6d4936601ba4d2 2024-11-24T08:32:37,876 INFO [StoreOpener-c20b5290c32295c6ca6d4936601ba4d2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c20b5290c32295c6ca6d4936601ba4d2 2024-11-24T08:32:37,877 INFO [StoreOpener-c20b5290c32295c6ca6d4936601ba4d2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c20b5290c32295c6ca6d4936601ba4d2 columnFamilyName info 2024-11-24T08:32:37,877 DEBUG [StoreOpener-c20b5290c32295c6ca6d4936601ba4d2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:32:37,887 DEBUG [StoreOpener-c20b5290c32295c6ca6d4936601ba4d2-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/c20b5290c32295c6ca6d4936601ba4d2/info/c6b28f93e4784ef69bc5662898674aab.189e68d79a69ab5600f63dfb617fc52c->hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/c6b28f93e4784ef69bc5662898674aab-bottom 2024-11-24T08:32:37,887 INFO [StoreOpener-c20b5290c32295c6ca6d4936601ba4d2-1 {}] regionserver.HStore(327): Store=c20b5290c32295c6ca6d4936601ba4d2/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:32:37,887 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for c20b5290c32295c6ca6d4936601ba4d2 2024-11-24T08:32:37,888 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/c20b5290c32295c6ca6d4936601ba4d2 2024-11-24T08:32:37,889 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/c20b5290c32295c6ca6d4936601ba4d2 2024-11-24T08:32:37,890 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for c20b5290c32295c6ca6d4936601ba4d2 2024-11-24T08:32:37,890 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for c20b5290c32295c6ca6d4936601ba4d2 2024-11-24T08:32:37,891 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for c20b5290c32295c6ca6d4936601ba4d2 2024-11-24T08:32:37,892 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened c20b5290c32295c6ca6d4936601ba4d2; next sequenceid=124; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=769024, jitterRate=-0.022135764360427856}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T08:32:37,892 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c20b5290c32295c6ca6d4936601ba4d2 2024-11-24T08:32:37,892 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for c20b5290c32295c6ca6d4936601ba4d2: Running coprocessor pre-open hook at 1732437157875Writing region info on filesystem at 1732437157875Initializing all the Stores at 1732437157875Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437157875Cleaning up temporary data from old regions at 1732437157890 (+15 ms)Running coprocessor post-open hooks at 1732437157892 (+2 ms)Region opened successfully at 1732437157892 2024-11-24T08:32:37,893 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2., pid=12, masterSystemTime=1732437157870 2024-11-24T08:32:37,893 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store c20b5290c32295c6ca6d4936601ba4d2:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:32:37,893 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:37,893 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-24T08:32:37,894 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2. 2024-11-24T08:32:37,894 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1541): c20b5290c32295c6ca6d4936601ba4d2/info is initiating minor compaction (all files) 2024-11-24T08:32:37,894 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c20b5290c32295c6ca6d4936601ba4d2/info in TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2. 2024-11-24T08:32:37,894 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/c20b5290c32295c6ca6d4936601ba4d2/info/c6b28f93e4784ef69bc5662898674aab.189e68d79a69ab5600f63dfb617fc52c->hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/c6b28f93e4784ef69bc5662898674aab-bottom] into tmpdir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/c20b5290c32295c6ca6d4936601ba4d2/.tmp, totalSize=75.7 K 2024-11-24T08:32:37,895 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting c6b28f93e4784ef69bc5662898674aab.189e68d79a69ab5600f63dfb617fc52c, keycount=33, bloomtype=ROW, size=75.7 K, encoding=NONE, compression=NONE, seqNum=84, earliestPutTs=1732437142993 2024-11-24T08:32:37,895 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2. 2024-11-24T08:32:37,895 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2. 2024-11-24T08:32:37,895 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505. 2024-11-24T08:32:37,896 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => fe9ce968c7bdd01663e70b82d2d6d505, NAME => 'TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-24T08:32:37,896 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:37,896 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:32:37,896 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:37,896 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:37,896 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=c20b5290c32295c6ca6d4936601ba4d2, regionState=OPEN, openSeqNum=124, regionLocation=30c28c82771d,39061,1732437132097 2024-11-24T08:32:37,897 INFO [StoreOpener-fe9ce968c7bdd01663e70b82d2d6d505-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:37,898 INFO [StoreOpener-fe9ce968c7bdd01663e70b82d2d6d505-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fe9ce968c7bdd01663e70b82d2d6d505 columnFamilyName info 2024-11-24T08:32:37,898 DEBUG [StoreOpener-fe9ce968c7bdd01663e70b82d2d6d505-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:32:37,898 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39061 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-24T08:32:37,898 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-24T08:32:37,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-24T08:32:37,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure c20b5290c32295c6ca6d4936601ba4d2, server=30c28c82771d,39061,1732437132097 because future has completed 2024-11-24T08:32:37,902 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-24T08:32:37,903 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure c20b5290c32295c6ca6d4936601ba4d2, server=30c28c82771d,39061,1732437132097 in 182 msec 2024-11-24T08:32:37,904 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c20b5290c32295c6ca6d4936601ba4d2, ASSIGN in 341 msec 2024-11-24T08:32:37,910 DEBUG [StoreOpener-fe9ce968c7bdd01663e70b82d2d6d505-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/TestLogRolling-testLogRolling=189e68d79a69ab5600f63dfb617fc52c-8c0ff2f455654727b0fb76520d76642f 2024-11-24T08:32:37,915 DEBUG [StoreOpener-fe9ce968c7bdd01663e70b82d2d6d505-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/TestLogRolling-testLogRolling=189e68d79a69ab5600f63dfb617fc52c-cf8c5ea67e134858bcdc6d056cadd1e7 2024-11-24T08:32:37,917 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c20b5290c32295c6ca6d4936601ba4d2#info#compaction#63 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:32:37,917 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/c20b5290c32295c6ca6d4936601ba4d2/.tmp/info/b3081fb6895a434abc2c5a9d60201ac8 is 1080, key is row0001/info:/1732437142993/Put/seqid=0 2024-11-24T08:32:37,920 DEBUG [StoreOpener-fe9ce968c7bdd01663e70b82d2d6d505-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/c6b28f93e4784ef69bc5662898674aab.189e68d79a69ab5600f63dfb617fc52c->hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/c6b28f93e4784ef69bc5662898674aab-top 2024-11-24T08:32:37,921 INFO [StoreOpener-fe9ce968c7bdd01663e70b82d2d6d505-1 {}] regionserver.HStore(327): Store=fe9ce968c7bdd01663e70b82d2d6d505/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:32:37,921 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:37,921 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:37,923 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:37,923 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:37,923 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:37,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/.tmp/info/39537f758f2748d7a8c9dbd5badc63cc is 193, key is TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505./info:regioninfo/1732437157715/Put/seqid=0 2024-11-24T08:32:37,925 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:37,926 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened fe9ce968c7bdd01663e70b82d2d6d505; next sequenceid=124; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=817347, jitterRate=0.039311230182647705}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T08:32:37,926 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:37,926 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for fe9ce968c7bdd01663e70b82d2d6d505: Running coprocessor pre-open hook at 1732437157896Writing region info on filesystem at 1732437157896Initializing all the Stores at 1732437157897 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437157897Cleaning up temporary data from old regions at 1732437157923 (+26 ms)Running coprocessor post-open hooks at 1732437157926 (+3 ms)Region opened successfully at 1732437157926 2024-11-24T08:32:37,927 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505., pid=13, masterSystemTime=1732437157870 2024-11-24T08:32:37,927 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store fe9ce968c7bdd01663e70b82d2d6d505:info, priority=-2147483648, current under compaction store size is 2 2024-11-24T08:32:37,927 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:37,927 DEBUG [RS:0;30c28c82771d:39061-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:32:37,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741850_1026 (size=70862) 2024-11-24T08:32:37,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741850_1026 (size=70862) 2024-11-24T08:32:37,930 INFO [RS:0;30c28c82771d:39061-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505. 2024-11-24T08:32:37,930 DEBUG [RS:0;30c28c82771d:39061-longCompactions-0 {}] regionserver.HStore(1541): fe9ce968c7bdd01663e70b82d2d6d505/info is initiating minor compaction (all files) 2024-11-24T08:32:37,930 INFO [RS:0;30c28c82771d:39061-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of fe9ce968c7bdd01663e70b82d2d6d505/info in TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505. 2024-11-24T08:32:37,930 INFO [RS:0;30c28c82771d:39061-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/c6b28f93e4784ef69bc5662898674aab.189e68d79a69ab5600f63dfb617fc52c->hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/c6b28f93e4784ef69bc5662898674aab-top, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/TestLogRolling-testLogRolling=189e68d79a69ab5600f63dfb617fc52c-8c0ff2f455654727b0fb76520d76642f, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/TestLogRolling-testLogRolling=189e68d79a69ab5600f63dfb617fc52c-cf8c5ea67e134858bcdc6d056cadd1e7] into tmpdir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp, totalSize=116.0 K 2024-11-24T08:32:37,931 DEBUG [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505. 2024-11-24T08:32:37,931 INFO [RS_OPEN_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505. 2024-11-24T08:32:37,931 DEBUG [RS:0;30c28c82771d:39061-longCompactions-0 {}] compactions.Compactor(225): Compacting c6b28f93e4784ef69bc5662898674aab.189e68d79a69ab5600f63dfb617fc52c, keycount=33, bloomtype=ROW, size=75.7 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1732437142993 2024-11-24T08:32:37,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741851_1027 (size=9847) 2024-11-24T08:32:37,932 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=fe9ce968c7bdd01663e70b82d2d6d505, regionState=OPEN, openSeqNum=124, regionLocation=30c28c82771d,39061,1732437132097 2024-11-24T08:32:37,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741851_1027 (size=9847) 2024-11-24T08:32:37,932 DEBUG [RS:0;30c28c82771d:39061-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=189e68d79a69ab5600f63dfb617fc52c-8c0ff2f455654727b0fb76520d76642f, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1732437157172 2024-11-24T08:32:37,932 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/.tmp/info/39537f758f2748d7a8c9dbd5badc63cc 2024-11-24T08:32:37,932 DEBUG [RS:0;30c28c82771d:39061-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=189e68d79a69ab5600f63dfb617fc52c-cf8c5ea67e134858bcdc6d056cadd1e7, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732437157198 2024-11-24T08:32:37,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure fe9ce968c7bdd01663e70b82d2d6d505, server=30c28c82771d,39061,1732437132097 because future has completed 2024-11-24T08:32:37,938 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/c20b5290c32295c6ca6d4936601ba4d2/.tmp/info/b3081fb6895a434abc2c5a9d60201ac8 as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/c20b5290c32295c6ca6d4936601ba4d2/info/b3081fb6895a434abc2c5a9d60201ac8 2024-11-24T08:32:37,940 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-24T08:32:37,940 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure fe9ce968c7bdd01663e70b82d2d6d505, server=30c28c82771d,39061,1732437132097 in 217 msec 2024-11-24T08:32:37,943 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-24T08:32:37,943 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fe9ce968c7bdd01663e70b82d2d6d505, ASSIGN in 378 msec 2024-11-24T08:32:37,945 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=189e68d79a69ab5600f63dfb617fc52c, daughterA=c20b5290c32295c6ca6d4936601ba4d2, daughterB=fe9ce968c7bdd01663e70b82d2d6d505 in 703 msec 2024-11-24T08:32:37,945 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in c20b5290c32295c6ca6d4936601ba4d2/info of c20b5290c32295c6ca6d4936601ba4d2 into b3081fb6895a434abc2c5a9d60201ac8(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:32:37,946 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c20b5290c32295c6ca6d4936601ba4d2: 2024-11-24T08:32:37,946 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2., storeName=c20b5290c32295c6ca6d4936601ba4d2/info, priority=15, startTime=1732437157893; duration=0sec 2024-11-24T08:32:37,946 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:37,946 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c20b5290c32295c6ca6d4936601ba4d2:info 2024-11-24T08:32:37,958 INFO [RS:0;30c28c82771d:39061-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe9ce968c7bdd01663e70b82d2d6d505#info#compaction#65 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:32:37,959 DEBUG [RS:0;30c28c82771d:39061-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/9ff1e5546a1445de8575e01c63062ded is 1080, key is row0062/info:/1732437155161/Put/seqid=0 2024-11-24T08:32:37,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741852_1028 (size=42984) 2024-11-24T08:32:37,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741852_1028 (size=42984) 2024-11-24T08:32:37,966 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/.tmp/ns/27c7753d04e448e7a95c1237a1578fce is 43, key is default/ns:d/1732437132874/Put/seqid=0 2024-11-24T08:32:37,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741853_1029 (size=5153) 2024-11-24T08:32:37,971 DEBUG [RS:0;30c28c82771d:39061-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/9ff1e5546a1445de8575e01c63062ded as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/9ff1e5546a1445de8575e01c63062ded 2024-11-24T08:32:37,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741853_1029 (size=5153) 2024-11-24T08:32:37,971 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/.tmp/ns/27c7753d04e448e7a95c1237a1578fce 2024-11-24T08:32:37,977 INFO [RS:0;30c28c82771d:39061-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in fe9ce968c7bdd01663e70b82d2d6d505/info of fe9ce968c7bdd01663e70b82d2d6d505 into 9ff1e5546a1445de8575e01c63062ded(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:32:37,977 DEBUG [RS:0;30c28c82771d:39061-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:32:37,977 INFO [RS:0;30c28c82771d:39061-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505., storeName=fe9ce968c7bdd01663e70b82d2d6d505/info, priority=13, startTime=1732437157927; duration=0sec 2024-11-24T08:32:37,977 DEBUG [RS:0;30c28c82771d:39061-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:37,977 DEBUG [RS:0;30c28c82771d:39061-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe9ce968c7bdd01663e70b82d2d6d505:info 2024-11-24T08:32:37,991 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/.tmp/table/87d8ef336e6743a3baa262e82566ea09 is 65, key is TestLogRolling-testLogRolling/table:state/1732437133284/Put/seqid=0 2024-11-24T08:32:37,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741854_1030 (size=5340) 2024-11-24T08:32:37,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741854_1030 (size=5340) 2024-11-24T08:32:37,996 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/.tmp/table/87d8ef336e6743a3baa262e82566ea09 2024-11-24T08:32:38,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/.tmp/info/39537f758f2748d7a8c9dbd5badc63cc as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/info/39537f758f2748d7a8c9dbd5badc63cc 2024-11-24T08:32:38,005 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/info/39537f758f2748d7a8c9dbd5badc63cc, entries=30, sequenceid=17, filesize=9.6 K 2024-11-24T08:32:38,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/.tmp/ns/27c7753d04e448e7a95c1237a1578fce as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/ns/27c7753d04e448e7a95c1237a1578fce 2024-11-24T08:32:38,010 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/ns/27c7753d04e448e7a95c1237a1578fce, entries=2, sequenceid=17, filesize=5.0 K 2024-11-24T08:32:38,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/.tmp/table/87d8ef336e6743a3baa262e82566ea09 as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/table/87d8ef336e6743a3baa262e82566ea09 2024-11-24T08:32:38,016 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/table/87d8ef336e6743a3baa262e82566ea09, entries=2, sequenceid=17, filesize=5.2 K 2024-11-24T08:32:38,017 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 119ms, sequenceid=17, compaction requested=false 2024-11-24T08:32:38,017 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-24T08:32:38,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:38,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:39,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:57672 deadline: 1732437169223, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c. is not online on 30c28c82771d,39061,1732437132097 2024-11-24T08:32:39,225 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c., hostname=30c28c82771d,39061,1732437132097, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c., hostname=30c28c82771d,39061,1732437132097, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c. is not online on 30c28c82771d,39061,1732437132097 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T08:32:39,225 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c., hostname=30c28c82771d,39061,1732437132097, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c. is not online on 30c28c82771d,39061,1732437132097 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T08:32:39,225 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732437132923.189e68d79a69ab5600f63dfb617fc52c., hostname=30c28c82771d,39061,1732437132097, seqNum=2 from cache 2024-11-24T08:32:39,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:39,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:40,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:40,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:41,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:41,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:42,033 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T08:32:42,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:42,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:42,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,456 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,456 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,474 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,474 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,474 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,474 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,474 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,475 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,477 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,477 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,477 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,985 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T08:32:42,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:42,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:43,007 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:43,007 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:43,007 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:43,007 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:43,007 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:43,008 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:43,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:43,011 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:43,011 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:43,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T08:32:43,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:43,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:44,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:44,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:45,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:45,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:46,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:46,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:47,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:47,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:48,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:48,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:49,261 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505., hostname=30c28c82771d,39061,1732437132097, seqNum=124] 2024-11-24T08:32:49,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(8855): Flush requested on fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:49,272 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fe9ce968c7bdd01663e70b82d2d6d505 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:32:49,277 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/c45133530fd844d09b7572b84f42bb13 is 1080, key is row0097/info:/1732437169262/Put/seqid=0 2024-11-24T08:32:49,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741855_1031 (size=12515) 2024-11-24T08:32:49,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741855_1031 (size=12515) 2024-11-24T08:32:49,283 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/c45133530fd844d09b7572b84f42bb13 2024-11-24T08:32:49,289 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/c45133530fd844d09b7572b84f42bb13 as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/c45133530fd844d09b7572b84f42bb13 2024-11-24T08:32:49,293 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/c45133530fd844d09b7572b84f42bb13, entries=7, sequenceid=134, filesize=12.2 K 2024-11-24T08:32:49,294 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for fe9ce968c7bdd01663e70b82d2d6d505 in 22ms, sequenceid=134, compaction requested=false 2024-11-24T08:32:49,294 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:32:49,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(8855): Flush requested on fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:49,295 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fe9ce968c7bdd01663e70b82d2d6d505 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-24T08:32:49,299 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/9fcab5316bf743c9a2c671992d8358b7 is 1080, key is row0104/info:/1732437169273/Put/seqid=0 2024-11-24T08:32:49,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741856_1032 (size=21156) 2024-11-24T08:32:49,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741856_1032 (size=21156) 2024-11-24T08:32:49,308 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/9fcab5316bf743c9a2c671992d8358b7 2024-11-24T08:32:49,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/9fcab5316bf743c9a2c671992d8358b7 as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/9fcab5316bf743c9a2c671992d8358b7 2024-11-24T08:32:49,317 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/9fcab5316bf743c9a2c671992d8358b7, entries=15, sequenceid=152, filesize=20.7 K 2024-11-24T08:32:49,318 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=10.51 KB/10760 for fe9ce968c7bdd01663e70b82d2d6d505 in 23ms, sequenceid=152, compaction requested=true 2024-11-24T08:32:49,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:32:49,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe9ce968c7bdd01663e70b82d2d6d505:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:32:49,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:49,318 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:32:49,319 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 76655 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:32:49,319 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1541): fe9ce968c7bdd01663e70b82d2d6d505/info is initiating minor compaction (all files) 2024-11-24T08:32:49,319 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of fe9ce968c7bdd01663e70b82d2d6d505/info in TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505. 2024-11-24T08:32:49,320 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/9ff1e5546a1445de8575e01c63062ded, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/c45133530fd844d09b7572b84f42bb13, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/9fcab5316bf743c9a2c671992d8358b7] into tmpdir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp, totalSize=74.9 K 2024-11-24T08:32:49,320 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9ff1e5546a1445de8575e01c63062ded, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732437155161 2024-11-24T08:32:49,320 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting c45133530fd844d09b7572b84f42bb13, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732437169262 2024-11-24T08:32:49,321 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9fcab5316bf743c9a2c671992d8358b7, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732437169273 2024-11-24T08:32:49,330 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe9ce968c7bdd01663e70b82d2d6d505#info#compaction#70 average throughput is 58.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:32:49,331 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/efe3e997f1434a2f81f53458b510cf6c is 1080, key is row0062/info:/1732437155161/Put/seqid=0 2024-11-24T08:32:49,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741857_1033 (size=66869) 2024-11-24T08:32:49,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741857_1033 (size=66869) 2024-11-24T08:32:49,361 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/efe3e997f1434a2f81f53458b510cf6c as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/efe3e997f1434a2f81f53458b510cf6c 2024-11-24T08:32:49,366 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in fe9ce968c7bdd01663e70b82d2d6d505/info of fe9ce968c7bdd01663e70b82d2d6d505 into efe3e997f1434a2f81f53458b510cf6c(size=65.3 K), total size for store is 65.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:32:49,366 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:32:49,366 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505., storeName=fe9ce968c7bdd01663e70b82d2d6d505/info, priority=13, startTime=1732437169318; duration=0sec 2024-11-24T08:32:49,366 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:49,367 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe9ce968c7bdd01663e70b82d2d6d505:info 2024-11-24T08:32:49,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:49,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:50,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:50,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:51,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(8855): Flush requested on fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:51,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fe9ce968c7bdd01663e70b82d2d6d505 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-24T08:32:51,317 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/1a0b7ec6f3fd4e00a6a108a24fb71a4c is 1080, key is row0119/info:/1732437169296/Put/seqid=0 2024-11-24T08:32:51,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741858_1034 (size=16828) 2024-11-24T08:32:51,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741858_1034 (size=16828) 2024-11-24T08:32:51,323 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/1a0b7ec6f3fd4e00a6a108a24fb71a4c 2024-11-24T08:32:51,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/1a0b7ec6f3fd4e00a6a108a24fb71a4c as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/1a0b7ec6f3fd4e00a6a108a24fb71a4c 2024-11-24T08:32:51,333 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/1a0b7ec6f3fd4e00a6a108a24fb71a4c, entries=11, sequenceid=167, filesize=16.4 K 2024-11-24T08:32:51,334 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=13.66 KB/13988 for fe9ce968c7bdd01663e70b82d2d6d505 in 21ms, sequenceid=167, compaction requested=false 2024-11-24T08:32:51,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:32:51,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(8855): Flush requested on fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:51,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fe9ce968c7bdd01663e70b82d2d6d505 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-24T08:32:51,338 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/1b25d67f43f94afab1dad1067f3c5124 is 1080, key is row0130/info:/1732437171314/Put/seqid=0 2024-11-24T08:32:51,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741859_1035 (size=21156) 2024-11-24T08:32:51,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741859_1035 (size=21156) 2024-11-24T08:32:51,344 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/1b25d67f43f94afab1dad1067f3c5124 2024-11-24T08:32:51,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/1b25d67f43f94afab1dad1067f3c5124 as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/1b25d67f43f94afab1dad1067f3c5124 2024-11-24T08:32:51,354 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/1b25d67f43f94afab1dad1067f3c5124, entries=15, sequenceid=185, filesize=20.7 K 2024-11-24T08:32:51,354 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=13.66 KB/13988 for fe9ce968c7bdd01663e70b82d2d6d505 in 19ms, sequenceid=185, compaction requested=true 2024-11-24T08:32:51,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:32:51,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe9ce968c7bdd01663e70b82d2d6d505:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:32:51,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:51,355 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:32:51,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(8855): Flush requested on fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:51,355 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fe9ce968c7bdd01663e70b82d2d6d505 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-24T08:32:51,356 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 104853 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:32:51,356 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1541): fe9ce968c7bdd01663e70b82d2d6d505/info is initiating minor compaction (all files) 2024-11-24T08:32:51,356 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of fe9ce968c7bdd01663e70b82d2d6d505/info in TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505. 2024-11-24T08:32:51,356 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/efe3e997f1434a2f81f53458b510cf6c, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/1a0b7ec6f3fd4e00a6a108a24fb71a4c, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/1b25d67f43f94afab1dad1067f3c5124] into tmpdir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp, totalSize=102.4 K 2024-11-24T08:32:51,356 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting efe3e997f1434a2f81f53458b510cf6c, keycount=57, bloomtype=ROW, size=65.3 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732437155161 2024-11-24T08:32:51,357 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1a0b7ec6f3fd4e00a6a108a24fb71a4c, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732437169296 2024-11-24T08:32:51,357 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1b25d67f43f94afab1dad1067f3c5124, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1732437171314 2024-11-24T08:32:51,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/4c5138218de2412c8c0d9f3e77d82781 is 1080, key is row0145/info:/1732437171336/Put/seqid=0 2024-11-24T08:32:51,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741860_1036 (size=20078) 2024-11-24T08:32:51,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741860_1036 (size=20078) 2024-11-24T08:32:51,365 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/4c5138218de2412c8c0d9f3e77d82781 2024-11-24T08:32:51,368 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe9ce968c7bdd01663e70b82d2d6d505#info#compaction#74 average throughput is 85.17 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:32:51,369 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/6afbac8ce9f947adb0a4920147c3f599 is 1080, key is row0062/info:/1732437155161/Put/seqid=0 2024-11-24T08:32:51,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/4c5138218de2412c8c0d9f3e77d82781 as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/4c5138218de2412c8c0d9f3e77d82781 2024-11-24T08:32:51,375 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/4c5138218de2412c8c0d9f3e77d82781, entries=14, sequenceid=202, filesize=19.6 K 2024-11-24T08:32:51,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741861_1037 (size=95076) 2024-11-24T08:32:51,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741861_1037 (size=95076) 2024-11-24T08:32:51,377 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=2.10 KB/2152 for fe9ce968c7bdd01663e70b82d2d6d505 in 22ms, sequenceid=202, compaction requested=false 2024-11-24T08:32:51,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:32:51,381 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/6afbac8ce9f947adb0a4920147c3f599 as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/6afbac8ce9f947adb0a4920147c3f599 2024-11-24T08:32:51,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:51,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:51,387 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in fe9ce968c7bdd01663e70b82d2d6d505/info of fe9ce968c7bdd01663e70b82d2d6d505 into 6afbac8ce9f947adb0a4920147c3f599(size=92.8 K), total size for store is 112.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:32:51,387 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:32:51,387 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505., storeName=fe9ce968c7bdd01663e70b82d2d6d505/info, priority=13, startTime=1732437171355; duration=0sec 2024-11-24T08:32:51,387 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:51,387 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe9ce968c7bdd01663e70b82d2d6d505:info 2024-11-24T08:32:52,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:52,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:53,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(8855): Flush requested on fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:53,366 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fe9ce968c7bdd01663e70b82d2d6d505 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:32:53,371 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/e0f3c463070c4d2dbd316e3109c3b11a is 1080, key is row0159/info:/1732437171356/Put/seqid=0 2024-11-24T08:32:53,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741862_1038 (size=12516) 2024-11-24T08:32:53,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741862_1038 (size=12516) 2024-11-24T08:32:53,376 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/e0f3c463070c4d2dbd316e3109c3b11a 2024-11-24T08:32:53,382 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/e0f3c463070c4d2dbd316e3109c3b11a as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/e0f3c463070c4d2dbd316e3109c3b11a 2024-11-24T08:32:53,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:53,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:53,387 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/e0f3c463070c4d2dbd316e3109c3b11a, entries=7, sequenceid=213, filesize=12.2 K 2024-11-24T08:32:53,388 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for fe9ce968c7bdd01663e70b82d2d6d505 in 22ms, sequenceid=213, compaction requested=true 2024-11-24T08:32:53,388 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:32:53,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe9ce968c7bdd01663e70b82d2d6d505:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:32:53,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:53,388 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:32:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(8855): Flush requested on fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:53,389 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fe9ce968c7bdd01663e70b82d2d6d505 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-24T08:32:53,389 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 127670 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:32:53,389 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1541): fe9ce968c7bdd01663e70b82d2d6d505/info is initiating minor compaction (all files) 2024-11-24T08:32:53,389 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of fe9ce968c7bdd01663e70b82d2d6d505/info in TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505. 2024-11-24T08:32:53,389 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/6afbac8ce9f947adb0a4920147c3f599, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/4c5138218de2412c8c0d9f3e77d82781, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/e0f3c463070c4d2dbd316e3109c3b11a] into tmpdir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp, totalSize=124.7 K 2024-11-24T08:32:53,390 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6afbac8ce9f947adb0a4920147c3f599, keycount=83, bloomtype=ROW, size=92.8 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1732437155161 2024-11-24T08:32:53,390 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4c5138218de2412c8c0d9f3e77d82781, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1732437171336 2024-11-24T08:32:53,391 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting e0f3c463070c4d2dbd316e3109c3b11a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732437171356 2024-11-24T08:32:53,393 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/66b9ec325d1140fd906e5791d622a3fa is 1080, key is row0166/info:/1732437173367/Put/seqid=0 2024-11-24T08:32:53,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741863_1039 (size=20078) 2024-11-24T08:32:53,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741863_1039 (size=20078) 2024-11-24T08:32:53,406 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/66b9ec325d1140fd906e5791d622a3fa 2024-11-24T08:32:53,410 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe9ce968c7bdd01663e70b82d2d6d505#info#compaction#77 average throughput is 53.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:32:53,411 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/82e0fb13bc504151a0af2bcc9d714b8c is 1080, key is row0062/info:/1732437155161/Put/seqid=0 2024-11-24T08:32:53,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/66b9ec325d1140fd906e5791d622a3fa as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/66b9ec325d1140fd906e5791d622a3fa 2024-11-24T08:32:53,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741864_1040 (size=117820) 2024-11-24T08:32:53,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741864_1040 (size=117820) 2024-11-24T08:32:53,418 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/66b9ec325d1140fd906e5791d622a3fa, entries=14, sequenceid=230, filesize=19.6 K 2024-11-24T08:32:53,419 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for fe9ce968c7bdd01663e70b82d2d6d505 in 30ms, sequenceid=230, compaction requested=false 2024-11-24T08:32:53,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:32:53,419 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/82e0fb13bc504151a0af2bcc9d714b8c as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/82e0fb13bc504151a0af2bcc9d714b8c 2024-11-24T08:32:53,424 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in fe9ce968c7bdd01663e70b82d2d6d505/info of fe9ce968c7bdd01663e70b82d2d6d505 into 82e0fb13bc504151a0af2bcc9d714b8c(size=115.1 K), total size for store is 134.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:32:53,424 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:32:53,424 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505., storeName=fe9ce968c7bdd01663e70b82d2d6d505/info, priority=13, startTime=1732437173388; duration=0sec 2024-11-24T08:32:53,424 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:53,424 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe9ce968c7bdd01663e70b82d2d6d505:info 2024-11-24T08:32:53,853 INFO [master/30c28c82771d:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T08:32:53,853 INFO [master/30c28c82771d:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T08:32:54,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:54,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:55,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:55,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:55,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(8855): Flush requested on fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:55,415 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fe9ce968c7bdd01663e70b82d2d6d505 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-24T08:32:55,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/41e25b4901e34d779876be792f83edc4 is 1080, key is row0180/info:/1732437173390/Put/seqid=0 2024-11-24T08:32:55,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741865_1041 (size=20078) 2024-11-24T08:32:55,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741865_1041 (size=20078) 2024-11-24T08:32:55,425 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/41e25b4901e34d779876be792f83edc4 2024-11-24T08:32:55,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/41e25b4901e34d779876be792f83edc4 as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/41e25b4901e34d779876be792f83edc4 2024-11-24T08:32:55,434 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/41e25b4901e34d779876be792f83edc4, entries=14, sequenceid=248, filesize=19.6 K 2024-11-24T08:32:55,435 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for fe9ce968c7bdd01663e70b82d2d6d505 in 21ms, sequenceid=248, compaction requested=true 2024-11-24T08:32:55,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:32:55,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe9ce968c7bdd01663e70b82d2d6d505:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:32:55,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:55,435 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:32:55,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(8855): Flush requested on fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:55,436 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fe9ce968c7bdd01663e70b82d2d6d505 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-24T08:32:55,436 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 157976 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:32:55,436 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1541): fe9ce968c7bdd01663e70b82d2d6d505/info is initiating minor compaction (all files) 2024-11-24T08:32:55,436 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of fe9ce968c7bdd01663e70b82d2d6d505/info in TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505. 2024-11-24T08:32:55,436 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/82e0fb13bc504151a0af2bcc9d714b8c, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/66b9ec325d1140fd906e5791d622a3fa, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/41e25b4901e34d779876be792f83edc4] into tmpdir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp, totalSize=154.3 K 2024-11-24T08:32:55,437 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting 82e0fb13bc504151a0af2bcc9d714b8c, keycount=104, bloomtype=ROW, size=115.1 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732437155161 2024-11-24T08:32:55,437 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting 66b9ec325d1140fd906e5791d622a3fa, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1732437173367 2024-11-24T08:32:55,437 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting 41e25b4901e34d779876be792f83edc4, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732437173390 2024-11-24T08:32:55,440 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/f65c019597484ca9be269fc3dd669b4f is 1080, key is row0194/info:/1732437175416/Put/seqid=0 2024-11-24T08:32:55,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741866_1042 (size=19007) 2024-11-24T08:32:55,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741866_1042 (size=19007) 2024-11-24T08:32:55,446 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=264 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/f65c019597484ca9be269fc3dd669b4f 2024-11-24T08:32:55,450 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe9ce968c7bdd01663e70b82d2d6d505#info#compaction#80 average throughput is 45.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:32:55,451 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/68a49b406de0469e8937541cf9999626 is 1080, key is row0062/info:/1732437155161/Put/seqid=0 2024-11-24T08:32:55,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/f65c019597484ca9be269fc3dd669b4f as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/f65c019597484ca9be269fc3dd669b4f 2024-11-24T08:32:55,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741867_1043 (size=148311) 2024-11-24T08:32:55,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741867_1043 (size=148311) 2024-11-24T08:32:55,460 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/f65c019597484ca9be269fc3dd669b4f, entries=13, sequenceid=264, filesize=18.6 K 2024-11-24T08:32:55,461 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for fe9ce968c7bdd01663e70b82d2d6d505 in 25ms, sequenceid=264, compaction requested=false 2024-11-24T08:32:55,461 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:32:55,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(8855): Flush requested on fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:55,462 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fe9ce968c7bdd01663e70b82d2d6d505 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-24T08:32:55,462 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/68a49b406de0469e8937541cf9999626 as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/68a49b406de0469e8937541cf9999626 2024-11-24T08:32:55,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/4e1b039fec7f4164bd859c8d5def5f69 is 1080, key is row0207/info:/1732437175437/Put/seqid=0 2024-11-24T08:32:55,469 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in fe9ce968c7bdd01663e70b82d2d6d505/info of fe9ce968c7bdd01663e70b82d2d6d505 into 68a49b406de0469e8937541cf9999626(size=144.8 K), total size for store is 163.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:32:55,469 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:32:55,469 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505., storeName=fe9ce968c7bdd01663e70b82d2d6d505/info, priority=13, startTime=1732437175435; duration=0sec 2024-11-24T08:32:55,469 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:55,469 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe9ce968c7bdd01663e70b82d2d6d505:info 2024-11-24T08:32:55,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741868_1044 (size=19013) 2024-11-24T08:32:55,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741868_1044 (size=19013) 2024-11-24T08:32:55,472 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/4e1b039fec7f4164bd859c8d5def5f69 2024-11-24T08:32:55,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/4e1b039fec7f4164bd859c8d5def5f69 as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/4e1b039fec7f4164bd859c8d5def5f69 2024-11-24T08:32:55,482 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/4e1b039fec7f4164bd859c8d5def5f69, entries=13, sequenceid=280, filesize=18.6 K 2024-11-24T08:32:55,483 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=5.25 KB/5380 for fe9ce968c7bdd01663e70b82d2d6d505 in 21ms, sequenceid=280, compaction requested=true 2024-11-24T08:32:55,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:32:55,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe9ce968c7bdd01663e70b82d2d6d505:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:32:55,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:55,483 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:32:55,484 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 186331 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:32:55,484 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1541): fe9ce968c7bdd01663e70b82d2d6d505/info is initiating minor compaction (all files) 2024-11-24T08:32:55,484 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of fe9ce968c7bdd01663e70b82d2d6d505/info in TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505. 2024-11-24T08:32:55,484 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/68a49b406de0469e8937541cf9999626, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/f65c019597484ca9be269fc3dd669b4f, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/4e1b039fec7f4164bd859c8d5def5f69] into tmpdir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp, totalSize=182.0 K 2024-11-24T08:32:55,485 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting 68a49b406de0469e8937541cf9999626, keycount=132, bloomtype=ROW, size=144.8 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732437155161 2024-11-24T08:32:55,485 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting f65c019597484ca9be269fc3dd669b4f, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1732437175416 2024-11-24T08:32:55,485 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4e1b039fec7f4164bd859c8d5def5f69, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732437175437 2024-11-24T08:32:55,496 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe9ce968c7bdd01663e70b82d2d6d505#info#compaction#82 average throughput is 54.04 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:32:55,497 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/a7267d0c67694fc7b97bc2449ca2f2d0 is 1080, key is row0062/info:/1732437155161/Put/seqid=0 2024-11-24T08:32:55,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741869_1045 (size=176469) 2024-11-24T08:32:55,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741869_1045 (size=176469) 2024-11-24T08:32:55,507 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/a7267d0c67694fc7b97bc2449ca2f2d0 as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/a7267d0c67694fc7b97bc2449ca2f2d0 2024-11-24T08:32:55,513 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in fe9ce968c7bdd01663e70b82d2d6d505/info of fe9ce968c7bdd01663e70b82d2d6d505 into a7267d0c67694fc7b97bc2449ca2f2d0(size=172.3 K), total size for store is 172.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:32:55,513 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:32:55,513 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505., storeName=fe9ce968c7bdd01663e70b82d2d6d505/info, priority=13, startTime=1732437175483; duration=0sec 2024-11-24T08:32:55,513 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:55,513 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe9ce968c7bdd01663e70b82d2d6d505:info 2024-11-24T08:32:56,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:56,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:57,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:57,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:57,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(8855): Flush requested on fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:57,477 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fe9ce968c7bdd01663e70b82d2d6d505 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T08:32:57,481 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/d6b6131a0dee4eaabfc5355db8a3f79b is 1080, key is row0220/info:/1732437175463/Put/seqid=0 2024-11-24T08:32:57,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741870_1046 (size=12523) 2024-11-24T08:32:57,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741870_1046 (size=12523) 2024-11-24T08:32:57,487 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/d6b6131a0dee4eaabfc5355db8a3f79b 2024-11-24T08:32:57,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/d6b6131a0dee4eaabfc5355db8a3f79b as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/d6b6131a0dee4eaabfc5355db8a3f79b 2024-11-24T08:32:57,496 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/d6b6131a0dee4eaabfc5355db8a3f79b, entries=7, sequenceid=292, filesize=12.2 K 2024-11-24T08:32:57,497 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for fe9ce968c7bdd01663e70b82d2d6d505 in 20ms, sequenceid=292, compaction requested=false 2024-11-24T08:32:57,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:32:57,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(8855): Flush requested on fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:32:57,498 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fe9ce968c7bdd01663e70b82d2d6d505 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-24T08:32:57,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/b31eaec8fa1743cbb069bf2a8ca0ab5c is 1080, key is row0227/info:/1732437177478/Put/seqid=0 2024-11-24T08:32:57,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741871_1047 (size=21171) 2024-11-24T08:32:57,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741871_1047 (size=21171) 2024-11-24T08:32:57,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=fe9ce968c7bdd01663e70b82d2d6d505, server=30c28c82771d,39061,1732437132097 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-24T08:32:57,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:57672 deadline: 1732437187516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=fe9ce968c7bdd01663e70b82d2d6d505, server=30c28c82771d,39061,1732437132097 2024-11-24T08:32:57,517 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505., hostname=30c28c82771d,39061,1732437132097, seqNum=124 , the old value is region=TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505., hostname=30c28c82771d,39061,1732437132097, seqNum=124, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=fe9ce968c7bdd01663e70b82d2d6d505, server=30c28c82771d,39061,1732437132097 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T08:32:57,517 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505., hostname=30c28c82771d,39061,1732437132097, seqNum=124 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=fe9ce968c7bdd01663e70b82d2d6d505, server=30c28c82771d,39061,1732437132097 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T08:32:57,517 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505., hostname=30c28c82771d,39061,1732437132097, seqNum=124 because the exception is null or not the one we care about 2024-11-24T08:32:57,845 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-24T08:32:57,915 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/b31eaec8fa1743cbb069bf2a8ca0ab5c 2024-11-24T08:32:57,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/b31eaec8fa1743cbb069bf2a8ca0ab5c as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/b31eaec8fa1743cbb069bf2a8ca0ab5c 2024-11-24T08:32:57,924 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/b31eaec8fa1743cbb069bf2a8ca0ab5c, entries=15, sequenceid=310, filesize=20.7 K 2024-11-24T08:32:57,925 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=14.71 KB/15064 for fe9ce968c7bdd01663e70b82d2d6d505 in 427ms, sequenceid=310, compaction requested=true 2024-11-24T08:32:57,925 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:32:57,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe9ce968c7bdd01663e70b82d2d6d505:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T08:32:57,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:57,925 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T08:32:57,926 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 210163 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T08:32:57,926 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1541): fe9ce968c7bdd01663e70b82d2d6d505/info is initiating minor compaction (all files) 2024-11-24T08:32:57,926 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of fe9ce968c7bdd01663e70b82d2d6d505/info in TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505. 2024-11-24T08:32:57,926 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/a7267d0c67694fc7b97bc2449ca2f2d0, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/d6b6131a0dee4eaabfc5355db8a3f79b, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/b31eaec8fa1743cbb069bf2a8ca0ab5c] into tmpdir=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp, totalSize=205.2 K 2024-11-24T08:32:57,926 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting a7267d0c67694fc7b97bc2449ca2f2d0, keycount=158, bloomtype=ROW, size=172.3 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732437155161 2024-11-24T08:32:57,927 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting d6b6131a0dee4eaabfc5355db8a3f79b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732437175463 2024-11-24T08:32:57,927 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] compactions.Compactor(225): Compacting b31eaec8fa1743cbb069bf2a8ca0ab5c, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1732437177478 2024-11-24T08:32:57,937 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe9ce968c7bdd01663e70b82d2d6d505#info#compaction#85 average throughput is 61.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T08:32:57,937 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/922d7b8557bd41b9b2fbbe9d5fe2af0d is 1080, key is row0062/info:/1732437155161/Put/seqid=0 2024-11-24T08:32:57,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741872_1048 (size=200313) 2024-11-24T08:32:57,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741872_1048 (size=200313) 2024-11-24T08:32:57,947 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/922d7b8557bd41b9b2fbbe9d5fe2af0d as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/922d7b8557bd41b9b2fbbe9d5fe2af0d 2024-11-24T08:32:57,952 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in fe9ce968c7bdd01663e70b82d2d6d505/info of fe9ce968c7bdd01663e70b82d2d6d505 into 922d7b8557bd41b9b2fbbe9d5fe2af0d(size=195.6 K), total size for store is 195.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T08:32:57,952 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:32:57,952 INFO [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505., storeName=fe9ce968c7bdd01663e70b82d2d6d505/info, priority=13, startTime=1732437177925; duration=0sec 2024-11-24T08:32:57,953 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T08:32:57,953 DEBUG [RS:0;30c28c82771d:39061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe9ce968c7bdd01663e70b82d2d6d505:info 2024-11-24T08:32:58,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:58,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:59,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:32:59,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:00,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:00,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:01,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:01,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:02,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:02,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:03,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:03,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:04,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:04,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:05,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:05,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:06,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:06,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:07,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:07,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:07,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39061 {}] regionserver.HRegion(8855): Flush requested on fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:33:07,588 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fe9ce968c7bdd01663e70b82d2d6d505 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-24T08:33:07,592 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/a7b8da6ab20748258e833ee4e7436813 is 1080, key is row0242/info:/1732437177499/Put/seqid=0 2024-11-24T08:33:07,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741873_1049 (size=21171) 2024-11-24T08:33:07,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741873_1049 (size=21171) 2024-11-24T08:33:07,597 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/a7b8da6ab20748258e833ee4e7436813 2024-11-24T08:33:07,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/.tmp/info/a7b8da6ab20748258e833ee4e7436813 as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/a7b8da6ab20748258e833ee4e7436813 2024-11-24T08:33:07,606 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/a7b8da6ab20748258e833ee4e7436813, entries=15, sequenceid=329, filesize=20.7 K 2024-11-24T08:33:07,607 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=0 B/0 for fe9ce968c7bdd01663e70b82d2d6d505 in 19ms, sequenceid=329, compaction requested=false 2024-11-24T08:33:07,607 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:33:08,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:08,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:09,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:09,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:09,589 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-24T08:33:09,589 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C39061%2C1732437132097.1732437189589 2024-11-24T08:33:09,597 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:09,597 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:09,597 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:09,597 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:09,598 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:09,598 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/WALs/30c28c82771d,39061,1732437132097/30c28c82771d%2C39061%2C1732437132097.1732437132479 with entries=310, filesize=307.72 KB; new WAL /user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/WALs/30c28c82771d,39061,1732437132097/30c28c82771d%2C39061%2C1732437132097.1732437189589 2024-11-24T08:33:09,600 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43051:43051),(127.0.0.1/127.0.0.1:44795:44795)] 2024-11-24T08:33:09,600 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/WALs/30c28c82771d,39061,1732437132097/30c28c82771d%2C39061%2C1732437132097.1732437132479 is not closed yet, will try archiving it next time 2024-11-24T08:33:09,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741833_1009 (size=315114) 2024-11-24T08:33:09,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741833_1009 (size=315114) 2024-11-24T08:33:09,601 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/WALs/30c28c82771d,39061,1732437132097/30c28c82771d%2C39061%2C1732437132097.1732437132479 to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/oldWALs/30c28c82771d%2C39061%2C1732437132097.1732437132479 2024-11-24T08:33:09,604 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-24T08:33:09,608 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/.tmp/info/0bfb04e55a7f4d7287bd1137986beddc is 193, key is TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505./info:regioninfo/1732437157931/Put/seqid=0 2024-11-24T08:33:09,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741875_1051 (size=6223) 2024-11-24T08:33:09,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741875_1051 (size=6223) 2024-11-24T08:33:09,612 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/.tmp/info/0bfb04e55a7f4d7287bd1137986beddc 2024-11-24T08:33:09,617 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/.tmp/info/0bfb04e55a7f4d7287bd1137986beddc as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/info/0bfb04e55a7f4d7287bd1137986beddc 2024-11-24T08:33:09,621 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/info/0bfb04e55a7f4d7287bd1137986beddc, entries=5, sequenceid=21, filesize=6.1 K 2024-11-24T08:33:09,622 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 18ms, sequenceid=21, compaction requested=false 2024-11-24T08:33:09,622 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-24T08:33:09,623 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for c20b5290c32295c6ca6d4936601ba4d2: 2024-11-24T08:33:09,623 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for fe9ce968c7bdd01663e70b82d2d6d505: 2024-11-24T08:33:09,623 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T08:33:09,623 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T08:33:09,623 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:33:09,623 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:33:09,623 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:33:09,624 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:33:09,624 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T08:33:09,624 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T08:33:09,624 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1374957656, stopped=false 2024-11-24T08:33:09,624 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=30c28c82771d,40953,1732437132049 2024-11-24T08:33:09,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:33:09,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39061-0x101491aad930001, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:33:09,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:33:09,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39061-0x101491aad930001, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:33:09,625 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:33:09,626 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:33:09,626 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:33:09,626 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:33:09,626 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39061-0x101491aad930001, quorum=127.0.0.1:51728, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:33:09,626 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '30c28c82771d,39061,1732437132097' ***** 2024-11-24T08:33:09,626 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T08:33:09,626 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:33:09,627 INFO [RS:0;30c28c82771d:39061 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T08:33:09,627 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T08:33:09,627 INFO [RS:0;30c28c82771d:39061 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T08:33:09,627 INFO [RS:0;30c28c82771d:39061 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T08:33:09,627 INFO [RS:0;30c28c82771d:39061 {}] regionserver.HRegionServer(3091): Received CLOSE for c20b5290c32295c6ca6d4936601ba4d2 2024-11-24T08:33:09,627 INFO [RS:0;30c28c82771d:39061 {}] regionserver.HRegionServer(3091): Received CLOSE for fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:33:09,627 INFO [RS:0;30c28c82771d:39061 {}] regionserver.HRegionServer(959): stopping server 30c28c82771d,39061,1732437132097 2024-11-24T08:33:09,627 INFO [RS:0;30c28c82771d:39061 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:33:09,627 INFO [RS:0;30c28c82771d:39061 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;30c28c82771d:39061. 2024-11-24T08:33:09,627 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c20b5290c32295c6ca6d4936601ba4d2, disabling compactions & flushes 2024-11-24T08:33:09,627 DEBUG [RS:0;30c28c82771d:39061 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:33:09,627 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2. 2024-11-24T08:33:09,627 DEBUG [RS:0;30c28c82771d:39061 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:33:09,627 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2. 2024-11-24T08:33:09,627 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2. after waiting 0 ms 2024-11-24T08:33:09,627 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2. 2024-11-24T08:33:09,627 INFO [RS:0;30c28c82771d:39061 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T08:33:09,627 INFO [RS:0;30c28c82771d:39061 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T08:33:09,627 INFO [RS:0;30c28c82771d:39061 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T08:33:09,627 INFO [RS:0;30c28c82771d:39061 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T08:33:09,628 INFO [RS:0;30c28c82771d:39061 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-24T08:33:09,628 DEBUG [RS:0;30c28c82771d:39061 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, c20b5290c32295c6ca6d4936601ba4d2=TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2., fe9ce968c7bdd01663e70b82d2d6d505=TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.} 2024-11-24T08:33:09,628 DEBUG [RS:0;30c28c82771d:39061 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c20b5290c32295c6ca6d4936601ba4d2, fe9ce968c7bdd01663e70b82d2d6d505 2024-11-24T08:33:09,628 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:33:09,628 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:33:09,628 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:33:09,628 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:33:09,628 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:33:09,628 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/c20b5290c32295c6ca6d4936601ba4d2/info/c6b28f93e4784ef69bc5662898674aab.189e68d79a69ab5600f63dfb617fc52c->hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/c6b28f93e4784ef69bc5662898674aab-bottom] to archive 2024-11-24T08:33:09,629 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T08:33:09,630 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/c20b5290c32295c6ca6d4936601ba4d2/info/c6b28f93e4784ef69bc5662898674aab.189e68d79a69ab5600f63dfb617fc52c to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/c20b5290c32295c6ca6d4936601ba4d2/info/c6b28f93e4784ef69bc5662898674aab.189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:33:09,630 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=30c28c82771d:40953 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-24T08:33:09,631 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-24T08:33:09,636 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-24T08:33:09,636 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:33:09,636 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:33:09,636 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732437189628Running coprocessor pre-close hooks at 1732437189628Disabling compacts and flushes for region at 1732437189628Disabling writes for close at 1732437189628Writing region close event to WAL at 1732437189629 (+1 ms)Running coprocessor post-close hooks at 1732437189636 (+7 ms)Closed at 1732437189636 2024-11-24T08:33:09,636 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T08:33:09,638 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/c20b5290c32295c6ca6d4936601ba4d2/recovered.edits/128.seqid, newMaxSeqId=128, maxSeqId=123 2024-11-24T08:33:09,638 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2. 2024-11-24T08:33:09,638 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c20b5290c32295c6ca6d4936601ba4d2: Waiting for close lock at 1732437189627Running coprocessor pre-close hooks at 1732437189627Disabling compacts and flushes for region at 1732437189627Disabling writes for close at 1732437189627Writing region close event to WAL at 1732437189635 (+8 ms)Running coprocessor post-close hooks at 1732437189638 (+3 ms)Closed at 1732437189638 2024-11-24T08:33:09,638 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732437157240.c20b5290c32295c6ca6d4936601ba4d2. 2024-11-24T08:33:09,639 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing fe9ce968c7bdd01663e70b82d2d6d505, disabling compactions & flushes 2024-11-24T08:33:09,639 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505. 2024-11-24T08:33:09,639 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505. 2024-11-24T08:33:09,639 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505. after waiting 0 ms 2024-11-24T08:33:09,639 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505. 2024-11-24T08:33:09,639 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/c6b28f93e4784ef69bc5662898674aab.189e68d79a69ab5600f63dfb617fc52c->hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/189e68d79a69ab5600f63dfb617fc52c/info/c6b28f93e4784ef69bc5662898674aab-top, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/TestLogRolling-testLogRolling=189e68d79a69ab5600f63dfb617fc52c-8c0ff2f455654727b0fb76520d76642f, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/9ff1e5546a1445de8575e01c63062ded, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/TestLogRolling-testLogRolling=189e68d79a69ab5600f63dfb617fc52c-cf8c5ea67e134858bcdc6d056cadd1e7, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/c45133530fd844d09b7572b84f42bb13, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/efe3e997f1434a2f81f53458b510cf6c, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/9fcab5316bf743c9a2c671992d8358b7, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/1a0b7ec6f3fd4e00a6a108a24fb71a4c, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/6afbac8ce9f947adb0a4920147c3f599, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/1b25d67f43f94afab1dad1067f3c5124, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/4c5138218de2412c8c0d9f3e77d82781, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/82e0fb13bc504151a0af2bcc9d714b8c, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/e0f3c463070c4d2dbd316e3109c3b11a, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/66b9ec325d1140fd906e5791d622a3fa, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/68a49b406de0469e8937541cf9999626, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/41e25b4901e34d779876be792f83edc4, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/f65c019597484ca9be269fc3dd669b4f, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/a7267d0c67694fc7b97bc2449ca2f2d0, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/4e1b039fec7f4164bd859c8d5def5f69, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/d6b6131a0dee4eaabfc5355db8a3f79b, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/b31eaec8fa1743cbb069bf2a8ca0ab5c] to archive 2024-11-24T08:33:09,640 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T08:33:09,641 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/c6b28f93e4784ef69bc5662898674aab.189e68d79a69ab5600f63dfb617fc52c to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/c6b28f93e4784ef69bc5662898674aab.189e68d79a69ab5600f63dfb617fc52c 2024-11-24T08:33:09,642 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/TestLogRolling-testLogRolling=189e68d79a69ab5600f63dfb617fc52c-8c0ff2f455654727b0fb76520d76642f to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/TestLogRolling-testLogRolling=189e68d79a69ab5600f63dfb617fc52c-8c0ff2f455654727b0fb76520d76642f 2024-11-24T08:33:09,643 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/9ff1e5546a1445de8575e01c63062ded to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/9ff1e5546a1445de8575e01c63062ded 2024-11-24T08:33:09,644 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/TestLogRolling-testLogRolling=189e68d79a69ab5600f63dfb617fc52c-cf8c5ea67e134858bcdc6d056cadd1e7 to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/TestLogRolling-testLogRolling=189e68d79a69ab5600f63dfb617fc52c-cf8c5ea67e134858bcdc6d056cadd1e7 2024-11-24T08:33:09,645 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/c45133530fd844d09b7572b84f42bb13 to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/c45133530fd844d09b7572b84f42bb13 2024-11-24T08:33:09,646 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/efe3e997f1434a2f81f53458b510cf6c to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/efe3e997f1434a2f81f53458b510cf6c 2024-11-24T08:33:09,647 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/9fcab5316bf743c9a2c671992d8358b7 to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/9fcab5316bf743c9a2c671992d8358b7 2024-11-24T08:33:09,648 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/1a0b7ec6f3fd4e00a6a108a24fb71a4c to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/1a0b7ec6f3fd4e00a6a108a24fb71a4c 2024-11-24T08:33:09,649 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/6afbac8ce9f947adb0a4920147c3f599 to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/6afbac8ce9f947adb0a4920147c3f599 2024-11-24T08:33:09,650 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/1b25d67f43f94afab1dad1067f3c5124 to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/1b25d67f43f94afab1dad1067f3c5124 2024-11-24T08:33:09,651 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/4c5138218de2412c8c0d9f3e77d82781 to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/4c5138218de2412c8c0d9f3e77d82781 2024-11-24T08:33:09,652 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/82e0fb13bc504151a0af2bcc9d714b8c to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/82e0fb13bc504151a0af2bcc9d714b8c 2024-11-24T08:33:09,653 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/e0f3c463070c4d2dbd316e3109c3b11a to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/e0f3c463070c4d2dbd316e3109c3b11a 2024-11-24T08:33:09,654 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/66b9ec325d1140fd906e5791d622a3fa to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/66b9ec325d1140fd906e5791d622a3fa 2024-11-24T08:33:09,655 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/68a49b406de0469e8937541cf9999626 to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/68a49b406de0469e8937541cf9999626 2024-11-24T08:33:09,656 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/41e25b4901e34d779876be792f83edc4 to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/41e25b4901e34d779876be792f83edc4 2024-11-24T08:33:09,657 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/f65c019597484ca9be269fc3dd669b4f to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/f65c019597484ca9be269fc3dd669b4f 2024-11-24T08:33:09,658 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/a7267d0c67694fc7b97bc2449ca2f2d0 to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/a7267d0c67694fc7b97bc2449ca2f2d0 2024-11-24T08:33:09,659 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/4e1b039fec7f4164bd859c8d5def5f69 to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/4e1b039fec7f4164bd859c8d5def5f69 2024-11-24T08:33:09,660 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/d6b6131a0dee4eaabfc5355db8a3f79b to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/d6b6131a0dee4eaabfc5355db8a3f79b 2024-11-24T08:33:09,661 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/b31eaec8fa1743cbb069bf2a8ca0ab5c to hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/archive/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/info/b31eaec8fa1743cbb069bf2a8ca0ab5c 2024-11-24T08:33:09,661 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [9ff1e5546a1445de8575e01c63062ded=42984, c45133530fd844d09b7572b84f42bb13=12515, efe3e997f1434a2f81f53458b510cf6c=66869, 9fcab5316bf743c9a2c671992d8358b7=21156, 1a0b7ec6f3fd4e00a6a108a24fb71a4c=16828, 6afbac8ce9f947adb0a4920147c3f599=95076, 1b25d67f43f94afab1dad1067f3c5124=21156, 4c5138218de2412c8c0d9f3e77d82781=20078, 82e0fb13bc504151a0af2bcc9d714b8c=117820, e0f3c463070c4d2dbd316e3109c3b11a=12516, 66b9ec325d1140fd906e5791d622a3fa=20078, 68a49b406de0469e8937541cf9999626=148311, 41e25b4901e34d779876be792f83edc4=20078, f65c019597484ca9be269fc3dd669b4f=19007, a7267d0c67694fc7b97bc2449ca2f2d0=176469, 4e1b039fec7f4164bd859c8d5def5f69=19013, d6b6131a0dee4eaabfc5355db8a3f79b=12523, b31eaec8fa1743cbb069bf2a8ca0ab5c=21171] 2024-11-24T08:33:09,664 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/data/default/TestLogRolling-testLogRolling/fe9ce968c7bdd01663e70b82d2d6d505/recovered.edits/333.seqid, newMaxSeqId=333, maxSeqId=123 2024-11-24T08:33:09,665 INFO [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505. 2024-11-24T08:33:09,665 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for fe9ce968c7bdd01663e70b82d2d6d505: Waiting for close lock at 1732437189639Running coprocessor pre-close hooks at 1732437189639Disabling compacts and flushes for region at 1732437189639Disabling writes for close at 1732437189639Writing region close event to WAL at 1732437189661 (+22 ms)Running coprocessor post-close hooks at 1732437189665 (+4 ms)Closed at 1732437189665 2024-11-24T08:33:09,665 DEBUG [RS_CLOSE_REGION-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732437157240.fe9ce968c7bdd01663e70b82d2d6d505. 2024-11-24T08:33:09,828 INFO [RS:0;30c28c82771d:39061 {}] regionserver.HRegionServer(976): stopping server 30c28c82771d,39061,1732437132097; all regions closed. 2024-11-24T08:33:09,828 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:09,828 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:09,829 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:09,829 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:09,829 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:09,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741834_1010 (size=8107) 2024-11-24T08:33:09,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741834_1010 (size=8107) 2024-11-24T08:33:09,833 DEBUG [RS:0;30c28c82771d:39061 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/oldWALs 2024-11-24T08:33:09,833 INFO [RS:0;30c28c82771d:39061 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30c28c82771d%2C39061%2C1732437132097.meta:.meta(num 1732437132837) 2024-11-24T08:33:09,833 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:09,833 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:09,833 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:09,833 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:09,834 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:09,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741874_1050 (size=779) 2024-11-24T08:33:09,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741874_1050 (size=779) 2024-11-24T08:33:09,837 DEBUG [RS:0;30c28c82771d:39061 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/oldWALs 2024-11-24T08:33:09,837 INFO [RS:0;30c28c82771d:39061 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30c28c82771d%2C39061%2C1732437132097:(num 1732437189589) 2024-11-24T08:33:09,837 DEBUG [RS:0;30c28c82771d:39061 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:33:09,837 INFO [RS:0;30c28c82771d:39061 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:33:09,837 INFO [RS:0;30c28c82771d:39061 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:33:09,837 INFO [RS:0;30c28c82771d:39061 {}] hbase.ChoreService(370): Chore service for: regionserver/30c28c82771d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T08:33:09,837 INFO [RS:0;30c28c82771d:39061 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:33:09,838 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:33:09,838 INFO [RS:0;30c28c82771d:39061 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39061 2024-11-24T08:33:09,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39061-0x101491aad930001, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/30c28c82771d,39061,1732437132097 2024-11-24T08:33:09,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:33:09,840 INFO [RS:0;30c28c82771d:39061 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:33:09,841 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [30c28c82771d,39061,1732437132097] 2024-11-24T08:33:09,843 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/30c28c82771d,39061,1732437132097 already deleted, retry=false 2024-11-24T08:33:09,843 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 30c28c82771d,39061,1732437132097 expired; onlineServers=0 2024-11-24T08:33:09,843 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '30c28c82771d,40953,1732437132049' ***** 2024-11-24T08:33:09,843 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T08:33:09,843 INFO [M:0;30c28c82771d:40953 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:33:09,843 INFO [M:0;30c28c82771d:40953 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:33:09,843 DEBUG [M:0;30c28c82771d:40953 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T08:33:09,843 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T08:33:09,843 DEBUG [M:0;30c28c82771d:40953 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T08:33:09,844 DEBUG [master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732437132240 {}] cleaner.HFileCleaner(306): Exit Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732437132240,5,FailOnTimeoutGroup] 2024-11-24T08:33:09,844 DEBUG [master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732437132237 {}] cleaner.HFileCleaner(306): Exit Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732437132237,5,FailOnTimeoutGroup] 2024-11-24T08:33:09,844 INFO [M:0;30c28c82771d:40953 {}] hbase.ChoreService(370): Chore service for: master/30c28c82771d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T08:33:09,844 INFO [M:0;30c28c82771d:40953 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:33:09,844 DEBUG [M:0;30c28c82771d:40953 {}] master.HMaster(1795): Stopping service threads 2024-11-24T08:33:09,844 INFO [M:0;30c28c82771d:40953 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T08:33:09,844 INFO [M:0;30c28c82771d:40953 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:33:09,844 INFO [M:0;30c28c82771d:40953 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T08:33:09,844 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T08:33:09,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T08:33:09,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:33:09,845 DEBUG [M:0;30c28c82771d:40953 {}] zookeeper.ZKUtil(347): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T08:33:09,845 WARN [M:0;30c28c82771d:40953 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T08:33:09,845 INFO [M:0;30c28c82771d:40953 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/.lastflushedseqids 2024-11-24T08:33:09,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741876_1052 (size=228) 2024-11-24T08:33:09,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741876_1052 (size=228) 2024-11-24T08:33:09,852 INFO [M:0;30c28c82771d:40953 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T08:33:09,852 INFO [M:0;30c28c82771d:40953 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T08:33:09,852 DEBUG [M:0;30c28c82771d:40953 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:33:09,852 INFO [M:0;30c28c82771d:40953 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:33:09,852 DEBUG [M:0;30c28c82771d:40953 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:33:09,853 DEBUG [M:0;30c28c82771d:40953 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:33:09,853 DEBUG [M:0;30c28c82771d:40953 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:33:09,853 INFO [M:0;30c28c82771d:40953 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-24T08:33:09,868 DEBUG [M:0;30c28c82771d:40953 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2a8ee4056c4e4cb798f5dd55871496d8 is 82, key is hbase:meta,,1/info:regioninfo/1732437132860/Put/seqid=0 2024-11-24T08:33:09,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741877_1053 (size=5672) 2024-11-24T08:33:09,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741877_1053 (size=5672) 2024-11-24T08:33:09,873 INFO [M:0;30c28c82771d:40953 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2a8ee4056c4e4cb798f5dd55871496d8 2024-11-24T08:33:09,891 DEBUG [M:0;30c28c82771d:40953 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/92418bdb641b4330883f4f00b59acbdb is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732437133288/Put/seqid=0 2024-11-24T08:33:09,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741878_1054 (size=7090) 2024-11-24T08:33:09,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741878_1054 (size=7090) 2024-11-24T08:33:09,896 INFO [M:0;30c28c82771d:40953 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/92418bdb641b4330883f4f00b59acbdb 2024-11-24T08:33:09,900 INFO [M:0;30c28c82771d:40953 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 92418bdb641b4330883f4f00b59acbdb 2024-11-24T08:33:09,914 DEBUG [M:0;30c28c82771d:40953 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d4eed77320b64e56bb6ab3fcdddd0c27 is 69, key is 30c28c82771d,39061,1732437132097/rs:state/1732437132330/Put/seqid=0 2024-11-24T08:33:09,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741879_1055 (size=5156) 2024-11-24T08:33:09,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741879_1055 (size=5156) 2024-11-24T08:33:09,919 INFO [M:0;30c28c82771d:40953 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d4eed77320b64e56bb6ab3fcdddd0c27 2024-11-24T08:33:09,937 DEBUG [M:0;30c28c82771d:40953 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5b751ae273ed4edab13a2ee3ea918e49 is 52, key is load_balancer_on/state:d/1732437132919/Put/seqid=0 2024-11-24T08:33:09,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741880_1056 (size=5056) 2024-11-24T08:33:09,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741880_1056 (size=5056) 2024-11-24T08:33:09,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39061-0x101491aad930001, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:33:09,941 INFO [RS:0;30c28c82771d:39061 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:33:09,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39061-0x101491aad930001, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:33:09,941 INFO [RS:0;30c28c82771d:39061 {}] regionserver.HRegionServer(1031): Exiting; stopping=30c28c82771d,39061,1732437132097; zookeeper connection closed. 2024-11-24T08:33:09,941 INFO [M:0;30c28c82771d:40953 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5b751ae273ed4edab13a2ee3ea918e49 2024-11-24T08:33:09,942 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@12f26131 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@12f26131 2024-11-24T08:33:09,942 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T08:33:09,946 DEBUG [M:0;30c28c82771d:40953 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2a8ee4056c4e4cb798f5dd55871496d8 as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2a8ee4056c4e4cb798f5dd55871496d8 2024-11-24T08:33:09,949 INFO [M:0;30c28c82771d:40953 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2a8ee4056c4e4cb798f5dd55871496d8, entries=8, sequenceid=125, filesize=5.5 K 2024-11-24T08:33:09,950 DEBUG [M:0;30c28c82771d:40953 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/92418bdb641b4330883f4f00b59acbdb as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/92418bdb641b4330883f4f00b59acbdb 2024-11-24T08:33:09,954 INFO [M:0;30c28c82771d:40953 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 92418bdb641b4330883f4f00b59acbdb 2024-11-24T08:33:09,954 INFO [M:0;30c28c82771d:40953 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/92418bdb641b4330883f4f00b59acbdb, entries=13, sequenceid=125, filesize=6.9 K 2024-11-24T08:33:09,955 DEBUG [M:0;30c28c82771d:40953 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d4eed77320b64e56bb6ab3fcdddd0c27 as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d4eed77320b64e56bb6ab3fcdddd0c27 2024-11-24T08:33:09,959 INFO [M:0;30c28c82771d:40953 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d4eed77320b64e56bb6ab3fcdddd0c27, entries=1, sequenceid=125, filesize=5.0 K 2024-11-24T08:33:09,959 DEBUG [M:0;30c28c82771d:40953 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5b751ae273ed4edab13a2ee3ea918e49 as hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5b751ae273ed4edab13a2ee3ea918e49 2024-11-24T08:33:09,963 INFO [M:0;30c28c82771d:40953 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39473/user/jenkins/test-data/af18ccca-21bd-d6d9-8148-dbd03059bdd2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5b751ae273ed4edab13a2ee3ea918e49, entries=1, sequenceid=125, filesize=4.9 K 2024-11-24T08:33:09,964 INFO [M:0;30c28c82771d:40953 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=125, compaction requested=false 2024-11-24T08:33:09,965 INFO [M:0;30c28c82771d:40953 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:33:09,965 DEBUG [M:0;30c28c82771d:40953 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732437189852Disabling compacts and flushes for region at 1732437189852Disabling writes for close at 1732437189853 (+1 ms)Obtaining lock to block concurrent updates at 1732437189853Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732437189853Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1732437189853Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732437189854 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732437189854Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732437189868 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732437189868Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732437189877 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732437189891 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732437189891Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732437189900 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732437189913 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732437189913Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732437189923 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732437189936 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732437189936Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@247218c3: reopening flushed file at 1732437189945 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@252bc87: reopening flushed file at 1732437189950 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@50b38aa5: reopening flushed file at 1732437189954 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c256a37: reopening flushed file at 1732437189959 (+5 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=125, compaction requested=false at 1732437189964 (+5 ms)Writing region close event to WAL at 1732437189965 (+1 ms)Closed at 1732437189965 2024-11-24T08:33:09,966 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:09,966 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:09,966 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:09,966 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:09,966 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:09,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741830_1006 (size=61320) 2024-11-24T08:33:09,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44775 is added to blk_1073741830_1006 (size=61320) 2024-11-24T08:33:09,968 INFO [M:0;30c28c82771d:40953 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T08:33:09,968 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:33:09,968 INFO [M:0;30c28c82771d:40953 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40953 2024-11-24T08:33:09,969 INFO [M:0;30c28c82771d:40953 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:33:10,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:33:10,070 INFO [M:0;30c28c82771d:40953 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:33:10,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40953-0x101491aad930000, quorum=127.0.0.1:51728, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:33:10,073 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@77f3cd08{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:33:10,073 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2401b6df{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:33:10,073 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:33:10,073 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b95b0ea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:33:10,074 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60b9b83d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/hadoop.log.dir/,STOPPED} 2024-11-24T08:33:10,075 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:33:10,075 WARN [BP-576311980-172.17.0.2-1732437131257 heartbeating to localhost/127.0.0.1:39473 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:33:10,075 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:33:10,075 WARN [BP-576311980-172.17.0.2-1732437131257 heartbeating to localhost/127.0.0.1:39473 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-576311980-172.17.0.2-1732437131257 (Datanode Uuid 629e8ff0-48a9-4f53-a549-0e7290ac3a96) service to localhost/127.0.0.1:39473 2024-11-24T08:33:10,076 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/cluster_f9b4c950-e343-bd3f-4844-0109e911b38b/data/data3/current/BP-576311980-172.17.0.2-1732437131257 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:33:10,076 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/cluster_f9b4c950-e343-bd3f-4844-0109e911b38b/data/data4/current/BP-576311980-172.17.0.2-1732437131257 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:33:10,076 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:33:10,078 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3612be31{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:33:10,078 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@51585bde{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:33:10,078 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:33:10,078 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@76e6f692{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:33:10,078 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7cf57c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/hadoop.log.dir/,STOPPED} 2024-11-24T08:33:10,080 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:33:10,080 WARN [BP-576311980-172.17.0.2-1732437131257 heartbeating to localhost/127.0.0.1:39473 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:33:10,080 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:33:10,080 WARN [BP-576311980-172.17.0.2-1732437131257 heartbeating to localhost/127.0.0.1:39473 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-576311980-172.17.0.2-1732437131257 (Datanode Uuid 692e365f-e0a1-4a55-9cc6-7579731da366) service to localhost/127.0.0.1:39473 2024-11-24T08:33:10,080 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/cluster_f9b4c950-e343-bd3f-4844-0109e911b38b/data/data1/current/BP-576311980-172.17.0.2-1732437131257 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:33:10,081 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/cluster_f9b4c950-e343-bd3f-4844-0109e911b38b/data/data2/current/BP-576311980-172.17.0.2-1732437131257 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:33:10,081 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:33:10,086 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@63e2e387{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:33:10,087 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@66ce6cc0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:33:10,087 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:33:10,087 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2dd2b381{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:33:10,087 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69d49ce9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/hadoop.log.dir/,STOPPED} 2024-11-24T08:33:10,094 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T08:33:10,119 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T08:33:10,129 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=232 (was 207) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39473 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39473 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39473 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39473 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39473 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39473 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39473 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:39473 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=503 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=21 (was 48), ProcessCount=11 (was 11), AvailableMemoryMB=6269 (was 6431) 2024-11-24T08:33:10,137 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=232, OpenFileDescriptor=503, MaxFileDescriptor=1048576, SystemLoadAverage=21, ProcessCount=11, AvailableMemoryMB=6269 2024-11-24T08:33:10,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T08:33:10,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/hadoop.log.dir so I do NOT create it in target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57 2024-11-24T08:33:10,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6350f64e-5feb-a4aa-31c6-12cbddb53adc/hadoop.tmp.dir so I do NOT create it in target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57 2024-11-24T08:33:10,137 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/cluster_d841ac18-6f81-863b-4c84-bbd270c9823a, deleteOnExit=true 2024-11-24T08:33:10,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T08:33:10,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/test.cache.data in system properties and HBase conf 2024-11-24T08:33:10,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T08:33:10,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/hadoop.log.dir in system properties and HBase conf 2024-11-24T08:33:10,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T08:33:10,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T08:33:10,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T08:33:10,138 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T08:33:10,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:33:10,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T08:33:10,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T08:33:10,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:33:10,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T08:33:10,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T08:33:10,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T08:33:10,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:33:10,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T08:33:10,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/nfs.dump.dir in system properties and HBase conf 2024-11-24T08:33:10,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/java.io.tmpdir in system properties and HBase conf 2024-11-24T08:33:10,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T08:33:10,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T08:33:10,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T08:33:10,151 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:33:10,214 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:33:10,217 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:33:10,221 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:33:10,221 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:33:10,221 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:33:10,221 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:33:10,222 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2394ff19{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:33:10,222 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7cdb81da{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:33:10,335 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6dcbbc83{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/java.io.tmpdir/jetty-localhost-45875-hadoop-hdfs-3_4_1-tests_jar-_-any-2116128083493809027/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:33:10,335 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1802ed83{HTTP/1.1, (http/1.1)}{localhost:45875} 2024-11-24T08:33:10,335 INFO [Time-limited test {}] server.Server(415): Started @296458ms 2024-11-24T08:33:10,348 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T08:33:10,349 INFO [regionserver/30c28c82771d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:33:10,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:10,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:10,403 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:33:10,406 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:33:10,406 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:33:10,406 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:33:10,406 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T08:33:10,407 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56836339{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:33:10,407 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49428dc3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:33:10,520 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@223a801d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/java.io.tmpdir/jetty-localhost-40801-hadoop-hdfs-3_4_1-tests_jar-_-any-14530437002905582345/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:33:10,520 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@8035060{HTTP/1.1, (http/1.1)}{localhost:40801} 2024-11-24T08:33:10,520 INFO [Time-limited test {}] server.Server(415): Started @296643ms 2024-11-24T08:33:10,522 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:33:10,550 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T08:33:10,552 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T08:33:10,553 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T08:33:10,553 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T08:33:10,553 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T08:33:10,554 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@644054b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/hadoop.log.dir/,AVAILABLE} 2024-11-24T08:33:10,554 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@186f146f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T08:33:10,627 WARN [Thread-2453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/cluster_d841ac18-6f81-863b-4c84-bbd270c9823a/data/data1/current/BP-488763426-172.17.0.2-1732437190157/current, will proceed with Du for space computation calculation, 2024-11-24T08:33:10,627 WARN [Thread-2454 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/cluster_d841ac18-6f81-863b-4c84-bbd270c9823a/data/data2/current/BP-488763426-172.17.0.2-1732437190157/current, will proceed with Du for space computation calculation, 2024-11-24T08:33:10,649 WARN [Thread-2432 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:33:10,651 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x15a6e9072f61de1e with lease ID 0xdae65dd78c17d13: Processing first storage report for DS-c0f962ae-a8f6-4fd9-90f3-57a0d54321db from datanode DatanodeRegistration(127.0.0.1:34369, datanodeUuid=15a6526b-f705-4e05-aee2-db14731c77fd, infoPort=34047, infoSecurePort=0, ipcPort=37219, storageInfo=lv=-57;cid=testClusterID;nsid=1193181819;c=1732437190157) 2024-11-24T08:33:10,651 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x15a6e9072f61de1e with lease ID 0xdae65dd78c17d13: from storage DS-c0f962ae-a8f6-4fd9-90f3-57a0d54321db node DatanodeRegistration(127.0.0.1:34369, datanodeUuid=15a6526b-f705-4e05-aee2-db14731c77fd, infoPort=34047, infoSecurePort=0, ipcPort=37219, storageInfo=lv=-57;cid=testClusterID;nsid=1193181819;c=1732437190157), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:33:10,652 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x15a6e9072f61de1e with lease ID 0xdae65dd78c17d13: Processing first storage report for DS-0959406c-5251-448b-8af0-f22ffbe01828 from datanode DatanodeRegistration(127.0.0.1:34369, datanodeUuid=15a6526b-f705-4e05-aee2-db14731c77fd, infoPort=34047, infoSecurePort=0, ipcPort=37219, storageInfo=lv=-57;cid=testClusterID;nsid=1193181819;c=1732437190157) 2024-11-24T08:33:10,652 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x15a6e9072f61de1e with lease ID 0xdae65dd78c17d13: from storage DS-0959406c-5251-448b-8af0-f22ffbe01828 node DatanodeRegistration(127.0.0.1:34369, datanodeUuid=15a6526b-f705-4e05-aee2-db14731c77fd, infoPort=34047, infoSecurePort=0, ipcPort=37219, storageInfo=lv=-57;cid=testClusterID;nsid=1193181819;c=1732437190157), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:33:10,676 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@40d0e54c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/java.io.tmpdir/jetty-localhost-39501-hadoop-hdfs-3_4_1-tests_jar-_-any-6449457501548324479/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:33:10,677 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@372003a{HTTP/1.1, (http/1.1)}{localhost:39501} 2024-11-24T08:33:10,677 INFO [Time-limited test {}] server.Server(415): Started @296799ms 2024-11-24T08:33:10,678 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T08:33:10,773 WARN [Thread-2479 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/cluster_d841ac18-6f81-863b-4c84-bbd270c9823a/data/data3/current/BP-488763426-172.17.0.2-1732437190157/current, will proceed with Du for space computation calculation, 2024-11-24T08:33:10,773 WARN [Thread-2480 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/cluster_d841ac18-6f81-863b-4c84-bbd270c9823a/data/data4/current/BP-488763426-172.17.0.2-1732437190157/current, will proceed with Du for space computation calculation, 2024-11-24T08:33:10,790 WARN [Thread-2468 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T08:33:10,792 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9a9791ebbab7c0b1 with lease ID 0xdae65dd78c17d14: Processing first storage report for DS-275e41df-38ea-4dfd-a137-6a33e3ae6199 from datanode DatanodeRegistration(127.0.0.1:45849, datanodeUuid=53e5c375-1367-4008-93b0-1e6c5a41e744, infoPort=36337, infoSecurePort=0, ipcPort=35199, storageInfo=lv=-57;cid=testClusterID;nsid=1193181819;c=1732437190157) 2024-11-24T08:33:10,792 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9a9791ebbab7c0b1 with lease ID 0xdae65dd78c17d14: from storage DS-275e41df-38ea-4dfd-a137-6a33e3ae6199 node DatanodeRegistration(127.0.0.1:45849, datanodeUuid=53e5c375-1367-4008-93b0-1e6c5a41e744, infoPort=36337, infoSecurePort=0, ipcPort=35199, storageInfo=lv=-57;cid=testClusterID;nsid=1193181819;c=1732437190157), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:33:10,792 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9a9791ebbab7c0b1 with lease ID 0xdae65dd78c17d14: Processing first storage report for DS-2b6f9c15-cffd-4989-9b6e-9af57e10a691 from datanode DatanodeRegistration(127.0.0.1:45849, datanodeUuid=53e5c375-1367-4008-93b0-1e6c5a41e744, infoPort=36337, infoSecurePort=0, ipcPort=35199, storageInfo=lv=-57;cid=testClusterID;nsid=1193181819;c=1732437190157) 2024-11-24T08:33:10,792 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9a9791ebbab7c0b1 with lease ID 0xdae65dd78c17d14: from storage DS-2b6f9c15-cffd-4989-9b6e-9af57e10a691 node DatanodeRegistration(127.0.0.1:45849, datanodeUuid=53e5c375-1367-4008-93b0-1e6c5a41e744, infoPort=36337, infoSecurePort=0, ipcPort=35199, storageInfo=lv=-57;cid=testClusterID;nsid=1193181819;c=1732437190157), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T08:33:10,799 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57 2024-11-24T08:33:10,801 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/cluster_d841ac18-6f81-863b-4c84-bbd270c9823a/zookeeper_0, clientPort=58437, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/cluster_d841ac18-6f81-863b-4c84-bbd270c9823a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/cluster_d841ac18-6f81-863b-4c84-bbd270c9823a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T08:33:10,802 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58437 2024-11-24T08:33:10,802 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:33:10,804 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:33:10,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34369 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:33:10,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45849 is added to blk_1073741825_1001 (size=7) 2024-11-24T08:33:10,813 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad with version=8 2024-11-24T08:33:10,813 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36899/user/jenkins/test-data/d8423371-72a0-c27b-469b-f67fa0cf0747/hbase-staging 2024-11-24T08:33:10,815 INFO [Time-limited test {}] client.ConnectionUtils(128): master/30c28c82771d:0 server-side Connection retries=45 2024-11-24T08:33:10,815 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:33:10,815 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:33:10,815 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:33:10,815 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:33:10,815 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:33:10,815 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T08:33:10,815 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:33:10,816 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34897 2024-11-24T08:33:10,817 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34897 connecting to ZooKeeper ensemble=127.0.0.1:58437 2024-11-24T08:33:10,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:348970x0, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:33:10,827 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34897-0x101491b93200000 connected 2024-11-24T08:33:10,841 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:33:10,842 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:33:10,844 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:33:10,844 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad, hbase.cluster.distributed=false 2024-11-24T08:33:10,846 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:33:10,847 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34897 2024-11-24T08:33:10,848 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34897 2024-11-24T08:33:10,848 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34897 2024-11-24T08:33:10,848 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34897 2024-11-24T08:33:10,848 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34897 2024-11-24T08:33:10,864 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/30c28c82771d:0 server-side Connection retries=45 2024-11-24T08:33:10,864 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:33:10,864 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T08:33:10,864 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T08:33:10,864 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T08:33:10,864 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T08:33:10,864 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T08:33:10,864 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T08:33:10,865 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43215 2024-11-24T08:33:10,866 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43215 connecting to ZooKeeper ensemble=127.0.0.1:58437 2024-11-24T08:33:10,866 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:33:10,868 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:33:10,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:432150x0, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T08:33:10,872 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:432150x0, quorum=127.0.0.1:58437, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:33:10,872 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43215-0x101491b93200001 connected 2024-11-24T08:33:10,872 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T08:33:10,873 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T08:33:10,873 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43215-0x101491b93200001, quorum=127.0.0.1:58437, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T08:33:10,874 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43215-0x101491b93200001, quorum=127.0.0.1:58437, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T08:33:10,874 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43215 2024-11-24T08:33:10,874 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43215 2024-11-24T08:33:10,875 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43215 2024-11-24T08:33:10,875 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43215 2024-11-24T08:33:10,875 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43215 2024-11-24T08:33:10,887 DEBUG [M:0;30c28c82771d:34897 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;30c28c82771d:34897 2024-11-24T08:33:10,887 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/30c28c82771d,34897,1732437190815 2024-11-24T08:33:10,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43215-0x101491b93200001, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:33:10,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:33:10,889 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/30c28c82771d,34897,1732437190815 2024-11-24T08:33:10,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43215-0x101491b93200001, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T08:33:10,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:33:10,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43215-0x101491b93200001, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:33:10,891 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T08:33:10,891 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/30c28c82771d,34897,1732437190815 from backup master directory 2024-11-24T08:33:10,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/30c28c82771d,34897,1732437190815 2024-11-24T08:33:10,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43215-0x101491b93200001, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:33:10,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T08:33:10,898 WARN [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:33:10,898 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=30c28c82771d,34897,1732437190815 2024-11-24T08:33:10,901 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/hbase.id] with ID: 996bffb8-8029-4b34-84a0-151103e3f92e 2024-11-24T08:33:10,901 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/.tmp/hbase.id 2024-11-24T08:33:10,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34369 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:33:10,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45849 is added to blk_1073741826_1002 (size=42) 2024-11-24T08:33:10,908 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/.tmp/hbase.id]:[hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/hbase.id] 2024-11-24T08:33:10,918 INFO [master/30c28c82771d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:33:10,918 INFO [master/30c28c82771d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T08:33:10,919 INFO [master/30c28c82771d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-24T08:33:10,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43215-0x101491b93200001, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:33:10,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:33:10,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45849 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:33:10,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34369 is added to blk_1073741827_1003 (size=196) 2024-11-24T08:33:10,929 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T08:33:10,930 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T08:33:10,930 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:33:10,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34369 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:33:10,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45849 is added to blk_1073741828_1004 (size=1189) 2024-11-24T08:33:10,938 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store 2024-11-24T08:33:10,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34369 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:33:10,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45849 is added to blk_1073741829_1005 (size=34) 2024-11-24T08:33:10,944 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:33:10,944 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:33:10,944 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:33:10,944 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:33:10,944 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:33:10,944 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:33:10,944 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:33:10,944 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732437190944Disabling compacts and flushes for region at 1732437190944Disabling writes for close at 1732437190944Writing region close event to WAL at 1732437190944Closed at 1732437190944 2024-11-24T08:33:10,945 WARN [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/.initializing 2024-11-24T08:33:10,945 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/WALs/30c28c82771d,34897,1732437190815 2024-11-24T08:33:10,947 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C34897%2C1732437190815, suffix=, logDir=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/WALs/30c28c82771d,34897,1732437190815, archiveDir=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/oldWALs, maxLogs=10 2024-11-24T08:33:10,948 INFO [master/30c28c82771d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C34897%2C1732437190815.1732437190948 2024-11-24T08:33:10,952 INFO [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/WALs/30c28c82771d,34897,1732437190815/30c28c82771d%2C34897%2C1732437190815.1732437190948 2024-11-24T08:33:10,956 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34047:34047),(127.0.0.1/127.0.0.1:36337:36337)] 2024-11-24T08:33:10,956 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:33:10,957 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:33:10,957 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:33:10,957 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:33:10,959 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:33:10,960 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T08:33:10,960 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:33:10,961 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:33:10,961 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:33:10,962 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T08:33:10,962 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:33:10,962 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:33:10,962 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:33:10,963 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T08:33:10,963 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:33:10,964 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:33:10,964 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:33:10,965 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T08:33:10,965 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:33:10,965 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T08:33:10,965 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:33:10,966 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:33:10,966 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:33:10,967 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:33:10,967 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:33:10,967 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T08:33:10,968 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T08:33:10,973 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:33:10,973 INFO [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=863867, jitterRate=0.09846378862857819}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T08:33:10,974 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732437190957Initializing all the Stores at 1732437190958 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437190958Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437190959 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437190959Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437190959Cleaning up temporary data from old regions at 1732437190967 (+8 ms)Region opened successfully at 1732437190974 (+7 ms) 2024-11-24T08:33:10,974 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T08:33:10,977 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@735179ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30c28c82771d/172.17.0.2:0 2024-11-24T08:33:10,977 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T08:33:10,978 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T08:33:10,978 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T08:33:10,978 INFO [master/30c28c82771d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T08:33:10,978 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T08:33:10,978 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T08:33:10,978 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T08:33:10,980 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T08:33:10,981 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T08:33:10,982 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T08:33:10,982 INFO [master/30c28c82771d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T08:33:10,983 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T08:33:10,984 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T08:33:10,984 INFO [master/30c28c82771d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T08:33:10,985 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T08:33:10,987 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T08:33:10,987 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T08:33:10,988 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T08:33:10,990 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T08:33:10,991 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T08:33:10,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:33:10,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43215-0x101491b93200001, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T08:33:10,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:33:10,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43215-0x101491b93200001, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:33:10,993 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=30c28c82771d,34897,1732437190815, sessionid=0x101491b93200000, setting cluster-up flag (Was=false) 2024-11-24T08:33:10,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:33:10,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43215-0x101491b93200001, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:33:11,001 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T08:33:11,002 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30c28c82771d,34897,1732437190815 2024-11-24T08:33:11,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43215-0x101491b93200001, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:33:11,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:33:11,009 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T08:33:11,010 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=30c28c82771d,34897,1732437190815 2024-11-24T08:33:11,011 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T08:33:11,012 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T08:33:11,013 INFO [master/30c28c82771d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T08:33:11,013 INFO [master/30c28c82771d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T08:33:11,013 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 30c28c82771d,34897,1732437190815 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T08:33:11,014 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:33:11,014 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:33:11,014 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:33:11,014 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/30c28c82771d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T08:33:11,014 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/30c28c82771d:0, corePoolSize=10, maxPoolSize=10 2024-11-24T08:33:11,014 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:33:11,014 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/30c28c82771d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:33:11,014 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:33:11,015 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732437221015 2024-11-24T08:33:11,015 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T08:33:11,015 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T08:33:11,015 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T08:33:11,015 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T08:33:11,015 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T08:33:11,015 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T08:33:11,015 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,015 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:33:11,016 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T08:33:11,016 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T08:33:11,016 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T08:33:11,016 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T08:33:11,016 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T08:33:11,016 INFO [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T08:33:11,016 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732437191016,5,FailOnTimeoutGroup] 2024-11-24T08:33:11,016 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732437191016,5,FailOnTimeoutGroup] 2024-11-24T08:33:11,016 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,016 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T08:33:11,016 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,016 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,017 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:33:11,017 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T08:33:11,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34369 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:33:11,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45849 is added to blk_1073741831_1007 (size=1321) 2024-11-24T08:33:11,024 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T08:33:11,024 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad 2024-11-24T08:33:11,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45849 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:33:11,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34369 is added to blk_1073741832_1008 (size=32) 2024-11-24T08:33:11,034 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:33:11,035 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:33:11,036 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:33:11,036 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:33:11,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:33:11,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:33:11,037 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:33:11,037 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:33:11,038 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:33:11,038 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:33:11,039 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:33:11,039 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:33:11,039 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:33:11,039 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:33:11,040 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:33:11,040 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:33:11,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:33:11,041 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:33:11,041 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/data/hbase/meta/1588230740 2024-11-24T08:33:11,041 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/data/hbase/meta/1588230740 2024-11-24T08:33:11,042 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:33:11,042 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:33:11,043 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:33:11,044 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:33:11,046 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T08:33:11,046 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=822941, jitterRate=0.04642483592033386}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:33:11,047 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732437191034Initializing all the Stores at 1732437191034Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437191034Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437191035 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437191035Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437191035Cleaning up temporary data from old regions at 1732437191042 (+7 ms)Region opened successfully at 1732437191047 (+5 ms) 2024-11-24T08:33:11,047 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:33:11,047 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:33:11,047 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:33:11,047 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:33:11,047 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:33:11,048 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:33:11,048 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732437191047Disabling compacts and flushes for region at 1732437191047Disabling writes for close at 1732437191047Writing region close event to WAL at 1732437191048 (+1 ms)Closed at 1732437191048 2024-11-24T08:33:11,049 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:33:11,049 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T08:33:11,049 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T08:33:11,050 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:33:11,051 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T08:33:11,077 INFO [RS:0;30c28c82771d:43215 {}] regionserver.HRegionServer(746): ClusterId : 996bffb8-8029-4b34-84a0-151103e3f92e 2024-11-24T08:33:11,077 DEBUG [RS:0;30c28c82771d:43215 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T08:33:11,079 DEBUG [RS:0;30c28c82771d:43215 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T08:33:11,079 DEBUG [RS:0;30c28c82771d:43215 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T08:33:11,081 DEBUG [RS:0;30c28c82771d:43215 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T08:33:11,081 DEBUG [RS:0;30c28c82771d:43215 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@311bb481, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=30c28c82771d/172.17.0.2:0 2024-11-24T08:33:11,093 DEBUG [RS:0;30c28c82771d:43215 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;30c28c82771d:43215 2024-11-24T08:33:11,093 INFO [RS:0;30c28c82771d:43215 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T08:33:11,093 INFO [RS:0;30c28c82771d:43215 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T08:33:11,093 DEBUG [RS:0;30c28c82771d:43215 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T08:33:11,094 INFO [RS:0;30c28c82771d:43215 {}] regionserver.HRegionServer(2659): reportForDuty to master=30c28c82771d,34897,1732437190815 with port=43215, startcode=1732437190863 2024-11-24T08:33:11,094 DEBUG [RS:0;30c28c82771d:43215 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T08:33:11,096 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32779, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T08:33:11,097 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34897 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 30c28c82771d,43215,1732437190863 2024-11-24T08:33:11,097 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34897 {}] master.ServerManager(517): Registering regionserver=30c28c82771d,43215,1732437190863 2024-11-24T08:33:11,098 DEBUG [RS:0;30c28c82771d:43215 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad 2024-11-24T08:33:11,098 DEBUG [RS:0;30c28c82771d:43215 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36285 2024-11-24T08:33:11,098 DEBUG [RS:0;30c28c82771d:43215 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T08:33:11,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:33:11,101 DEBUG [RS:0;30c28c82771d:43215 {}] zookeeper.ZKUtil(111): regionserver:43215-0x101491b93200001, quorum=127.0.0.1:58437, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/30c28c82771d,43215,1732437190863 2024-11-24T08:33:11,101 WARN [RS:0;30c28c82771d:43215 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T08:33:11,101 INFO [RS:0;30c28c82771d:43215 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:33:11,101 DEBUG [RS:0;30c28c82771d:43215 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/WALs/30c28c82771d,43215,1732437190863 2024-11-24T08:33:11,102 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [30c28c82771d,43215,1732437190863] 2024-11-24T08:33:11,105 INFO [RS:0;30c28c82771d:43215 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T08:33:11,106 INFO [RS:0;30c28c82771d:43215 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T08:33:11,107 INFO [RS:0;30c28c82771d:43215 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T08:33:11,107 INFO [RS:0;30c28c82771d:43215 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,107 INFO [RS:0;30c28c82771d:43215 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T08:33:11,108 INFO [RS:0;30c28c82771d:43215 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T08:33:11,108 INFO [RS:0;30c28c82771d:43215 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,108 DEBUG [RS:0;30c28c82771d:43215 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:33:11,108 DEBUG [RS:0;30c28c82771d:43215 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:33:11,108 DEBUG [RS:0;30c28c82771d:43215 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:33:11,108 DEBUG [RS:0;30c28c82771d:43215 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:33:11,108 DEBUG [RS:0;30c28c82771d:43215 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:33:11,108 DEBUG [RS:0;30c28c82771d:43215 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/30c28c82771d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T08:33:11,108 DEBUG [RS:0;30c28c82771d:43215 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:33:11,108 DEBUG [RS:0;30c28c82771d:43215 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:33:11,108 DEBUG [RS:0;30c28c82771d:43215 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:33:11,108 DEBUG [RS:0;30c28c82771d:43215 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:33:11,108 DEBUG [RS:0;30c28c82771d:43215 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:33:11,108 DEBUG [RS:0;30c28c82771d:43215 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/30c28c82771d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T08:33:11,108 DEBUG [RS:0;30c28c82771d:43215 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/30c28c82771d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:33:11,108 DEBUG [RS:0;30c28c82771d:43215 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/30c28c82771d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T08:33:11,108 INFO [RS:0;30c28c82771d:43215 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,108 INFO [RS:0;30c28c82771d:43215 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,108 INFO [RS:0;30c28c82771d:43215 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,108 INFO [RS:0;30c28c82771d:43215 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,108 INFO [RS:0;30c28c82771d:43215 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,108 INFO [RS:0;30c28c82771d:43215 {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,43215,1732437190863-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:33:11,123 INFO [RS:0;30c28c82771d:43215 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T08:33:11,124 INFO [RS:0;30c28c82771d:43215 {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,43215,1732437190863-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,124 INFO [RS:0;30c28c82771d:43215 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,124 INFO [RS:0;30c28c82771d:43215 {}] regionserver.Replication(171): 30c28c82771d,43215,1732437190863 started 2024-11-24T08:33:11,138 INFO [RS:0;30c28c82771d:43215 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,138 INFO [RS:0;30c28c82771d:43215 {}] regionserver.HRegionServer(1482): Serving as 30c28c82771d,43215,1732437190863, RpcServer on 30c28c82771d/172.17.0.2:43215, sessionid=0x101491b93200001 2024-11-24T08:33:11,139 DEBUG [RS:0;30c28c82771d:43215 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T08:33:11,139 DEBUG [RS:0;30c28c82771d:43215 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 30c28c82771d,43215,1732437190863 2024-11-24T08:33:11,139 DEBUG [RS:0;30c28c82771d:43215 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30c28c82771d,43215,1732437190863' 2024-11-24T08:33:11,139 DEBUG [RS:0;30c28c82771d:43215 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T08:33:11,139 DEBUG [RS:0;30c28c82771d:43215 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T08:33:11,140 DEBUG [RS:0;30c28c82771d:43215 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T08:33:11,140 DEBUG [RS:0;30c28c82771d:43215 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T08:33:11,140 DEBUG [RS:0;30c28c82771d:43215 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 30c28c82771d,43215,1732437190863 2024-11-24T08:33:11,140 DEBUG [RS:0;30c28c82771d:43215 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '30c28c82771d,43215,1732437190863' 2024-11-24T08:33:11,140 DEBUG [RS:0;30c28c82771d:43215 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T08:33:11,140 DEBUG [RS:0;30c28c82771d:43215 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T08:33:11,140 DEBUG [RS:0;30c28c82771d:43215 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T08:33:11,140 INFO [RS:0;30c28c82771d:43215 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T08:33:11,140 INFO [RS:0;30c28c82771d:43215 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T08:33:11,201 WARN [30c28c82771d:34897 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T08:33:11,242 INFO [RS:0;30c28c82771d:43215 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C43215%2C1732437190863, suffix=, logDir=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/WALs/30c28c82771d,43215,1732437190863, archiveDir=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/oldWALs, maxLogs=32 2024-11-24T08:33:11,243 INFO [RS:0;30c28c82771d:43215 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C43215%2C1732437190863.1732437191243 2024-11-24T08:33:11,248 INFO [RS:0;30c28c82771d:43215 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/WALs/30c28c82771d,43215,1732437190863/30c28c82771d%2C43215%2C1732437190863.1732437191243 2024-11-24T08:33:11,249 DEBUG [RS:0;30c28c82771d:43215 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36337:36337),(127.0.0.1/127.0.0.1:34047:34047)] 2024-11-24T08:33:11,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,34635,1732437001369/30c28c82771d%2C34635%2C1732437001369.1732437001555 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:11,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45383/user/jenkins/test-data/78a65cad-382c-e82a-49e8-5b766fa23d40/WALs/30c28c82771d,45841,1732437000220/30c28c82771d%2C45841%2C1732437000220.meta.1732437001205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T08:33:11,452 DEBUG [30c28c82771d:34897 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T08:33:11,452 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=30c28c82771d,43215,1732437190863 2024-11-24T08:33:11,453 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30c28c82771d,43215,1732437190863, state=OPENING 2024-11-24T08:33:11,455 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T08:33:11,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:33:11,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43215-0x101491b93200001, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:33:11,456 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:33:11,456 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T08:33:11,456 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:33:11,456 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=30c28c82771d,43215,1732437190863}] 2024-11-24T08:33:11,609 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T08:33:11,611 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49753, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T08:33:11,614 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T08:33:11,614 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:33:11,615 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=30c28c82771d%2C43215%2C1732437190863.meta, suffix=.meta, logDir=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/WALs/30c28c82771d,43215,1732437190863, archiveDir=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/oldWALs, maxLogs=32 2024-11-24T08:33:11,615 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 30c28c82771d%2C43215%2C1732437190863.meta.1732437191615.meta 2024-11-24T08:33:11,620 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/WALs/30c28c82771d,43215,1732437190863/30c28c82771d%2C43215%2C1732437190863.meta.1732437191615.meta 2024-11-24T08:33:11,624 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34047:34047),(127.0.0.1/127.0.0.1:36337:36337)] 2024-11-24T08:33:11,625 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T08:33:11,625 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T08:33:11,625 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T08:33:11,625 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T08:33:11,626 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T08:33:11,626 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T08:33:11,626 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T08:33:11,626 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T08:33:11,627 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T08:33:11,627 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T08:33:11,627 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:33:11,628 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:33:11,628 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T08:33:11,628 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T08:33:11,628 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:33:11,629 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:33:11,629 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T08:33:11,629 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T08:33:11,629 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:33:11,630 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:33:11,630 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T08:33:11,630 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T08:33:11,630 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T08:33:11,631 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T08:33:11,631 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T08:33:11,631 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/data/hbase/meta/1588230740 2024-11-24T08:33:11,632 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/data/hbase/meta/1588230740 2024-11-24T08:33:11,633 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T08:33:11,633 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T08:33:11,633 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T08:33:11,634 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T08:33:11,635 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=728761, jitterRate=-0.07333303987979889}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T08:33:11,635 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T08:33:11,635 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732437191626Writing region info on filesystem at 1732437191626Initializing all the Stores at 1732437191626Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437191626Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437191627 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732437191627Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732437191627Cleaning up temporary data from old regions at 1732437191633 (+6 ms)Running coprocessor post-open hooks at 1732437191635 (+2 ms)Region opened successfully at 1732437191635 2024-11-24T08:33:11,636 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732437191608 2024-11-24T08:33:11,638 DEBUG [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T08:33:11,638 INFO [RS_OPEN_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T08:33:11,639 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=30c28c82771d,43215,1732437190863 2024-11-24T08:33:11,639 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 30c28c82771d,43215,1732437190863, state=OPEN 2024-11-24T08:33:11,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:33:11,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43215-0x101491b93200001, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T08:33:11,645 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=30c28c82771d,43215,1732437190863 2024-11-24T08:33:11,645 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:33:11,645 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T08:33:11,647 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T08:33:11,647 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=30c28c82771d,43215,1732437190863 in 189 msec 2024-11-24T08:33:11,649 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T08:33:11,649 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 598 msec 2024-11-24T08:33:11,649 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T08:33:11,649 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T08:33:11,650 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:33:11,650 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30c28c82771d,43215,1732437190863, seqNum=-1] 2024-11-24T08:33:11,651 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:33:11,652 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52379, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:33:11,656 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 644 msec 2024-11-24T08:33:11,656 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732437191656, completionTime=-1 2024-11-24T08:33:11,657 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T08:33:11,657 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T08:33:11,658 INFO [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T08:33:11,658 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732437251658 2024-11-24T08:33:11,659 INFO [master/30c28c82771d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732437311658 2024-11-24T08:33:11,659 INFO [master/30c28c82771d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-24T08:33:11,659 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,34897,1732437190815-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,659 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,34897,1732437190815-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,659 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,34897,1732437190815-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,659 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-30c28c82771d:34897, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,659 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,659 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,661 DEBUG [master/30c28c82771d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T08:33:11,663 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.765sec 2024-11-24T08:33:11,663 INFO [master/30c28c82771d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T08:33:11,663 INFO [master/30c28c82771d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T08:33:11,663 INFO [master/30c28c82771d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T08:33:11,663 INFO [master/30c28c82771d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T08:33:11,663 INFO [master/30c28c82771d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T08:33:11,663 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,34897,1732437190815-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T08:33:11,663 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,34897,1732437190815-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T08:33:11,665 DEBUG [master/30c28c82771d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T08:33:11,665 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T08:33:11,665 INFO [master/30c28c82771d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=30c28c82771d,34897,1732437190815-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T08:33:11,677 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4baed4c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:33:11,677 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 30c28c82771d,34897,-1 for getting cluster id 2024-11-24T08:33:11,677 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T08:33:11,678 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '996bffb8-8029-4b34-84a0-151103e3f92e' 2024-11-24T08:33:11,679 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T08:33:11,679 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "996bffb8-8029-4b34-84a0-151103e3f92e" 2024-11-24T08:33:11,679 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51022f48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:33:11,679 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [30c28c82771d,34897,-1] 2024-11-24T08:33:11,679 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T08:33:11,679 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:33:11,680 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45786, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T08:33:11,681 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f4fc7f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T08:33:11,681 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T08:33:11,682 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=30c28c82771d,43215,1732437190863, seqNum=-1] 2024-11-24T08:33:11,682 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T08:33:11,683 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57140, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T08:33:11,684 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=30c28c82771d,34897,1732437190815 2024-11-24T08:33:11,684 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T08:33:11,686 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T08:33:11,687 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T08:33:11,688 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/WALs/test.com,8080,1, archiveDir=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/oldWALs, maxLogs=32 2024-11-24T08:33:11,689 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732437191689 2024-11-24T08:33:11,693 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/WALs/test.com,8080,1/test.com%2C8080%2C1.1732437191689 2024-11-24T08:33:11,694 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36337:36337),(127.0.0.1/127.0.0.1:34047:34047)] 2024-11-24T08:33:11,694 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732437191694 2024-11-24T08:33:11,698 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:11,698 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:11,698 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:11,698 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:11,698 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:11,699 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/WALs/test.com,8080,1/test.com%2C8080%2C1.1732437191689 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/WALs/test.com,8080,1/test.com%2C8080%2C1.1732437191694 2024-11-24T08:33:11,699 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36337:36337),(127.0.0.1/127.0.0.1:34047:34047)] 2024-11-24T08:33:11,699 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/WALs/test.com,8080,1/test.com%2C8080%2C1.1732437191689 is not closed yet, will try archiving it next time 2024-11-24T08:33:11,700 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:11,700 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:11,700 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:11,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34369 is added to blk_1073741835_1011 (size=93) 2024-11-24T08:33:11,700 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:11,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45849 is added to blk_1073741835_1011 (size=93) 2024-11-24T08:33:11,700 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:11,701 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/WALs/test.com,8080,1/test.com%2C8080%2C1.1732437191689 to hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/oldWALs/test.com%2C8080%2C1.1732437191689 2024-11-24T08:33:11,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34369 is added to blk_1073741836_1012 (size=93) 2024-11-24T08:33:11,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45849 is added to blk_1073741836_1012 (size=93) 2024-11-24T08:33:11,704 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/oldWALs 2024-11-24T08:33:11,704 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732437191694) 2024-11-24T08:33:11,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T08:33:11,704 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:33:11,704 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:33:11,704 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:33:11,704 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:33:11,704 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T08:33:11,704 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T08:33:11,704 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1471200900, stopped=false 2024-11-24T08:33:11,704 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=30c28c82771d,34897,1732437190815 2024-11-24T08:33:11,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43215-0x101491b93200001, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:33:11,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T08:33:11,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43215-0x101491b93200001, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:33:11,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:33:11,706 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:33:11,706 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T08:33:11,706 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:33:11,706 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:33:11,706 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '30c28c82771d,43215,1732437190863' ***** 2024-11-24T08:33:11,706 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T08:33:11,706 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:33:11,706 INFO [RS:0;30c28c82771d:43215 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T08:33:11,707 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T08:33:11,707 INFO [RS:0;30c28c82771d:43215 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T08:33:11,707 INFO [RS:0;30c28c82771d:43215 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T08:33:11,707 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43215-0x101491b93200001, quorum=127.0.0.1:58437, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T08:33:11,707 INFO [RS:0;30c28c82771d:43215 {}] regionserver.HRegionServer(959): stopping server 30c28c82771d,43215,1732437190863 2024-11-24T08:33:11,707 INFO [RS:0;30c28c82771d:43215 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:33:11,707 INFO [RS:0;30c28c82771d:43215 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;30c28c82771d:43215. 2024-11-24T08:33:11,707 DEBUG [RS:0;30c28c82771d:43215 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T08:33:11,707 DEBUG [RS:0;30c28c82771d:43215 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:33:11,707 INFO [RS:0;30c28c82771d:43215 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T08:33:11,707 INFO [RS:0;30c28c82771d:43215 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T08:33:11,707 INFO [RS:0;30c28c82771d:43215 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T08:33:11,707 INFO [RS:0;30c28c82771d:43215 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T08:33:11,707 INFO [RS:0;30c28c82771d:43215 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-24T08:33:11,707 DEBUG [RS:0;30c28c82771d:43215 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-24T08:33:11,707 DEBUG [RS:0;30c28c82771d:43215 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T08:33:11,707 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T08:33:11,707 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T08:33:11,707 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T08:33:11,707 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T08:33:11,707 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T08:33:11,708 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-24T08:33:11,722 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/data/hbase/meta/1588230740/.tmp/ns/9a6867e3e3df45b9981a8909e850cc77 is 43, key is default/ns:d/1732437191652/Put/seqid=0 2024-11-24T08:33:11,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34369 is added to blk_1073741837_1013 (size=5153) 2024-11-24T08:33:11,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45849 is added to blk_1073741837_1013 (size=5153) 2024-11-24T08:33:11,727 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/data/hbase/meta/1588230740/.tmp/ns/9a6867e3e3df45b9981a8909e850cc77 2024-11-24T08:33:11,731 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/data/hbase/meta/1588230740/.tmp/ns/9a6867e3e3df45b9981a8909e850cc77 as hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/data/hbase/meta/1588230740/ns/9a6867e3e3df45b9981a8909e850cc77 2024-11-24T08:33:11,735 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/data/hbase/meta/1588230740/ns/9a6867e3e3df45b9981a8909e850cc77, entries=2, sequenceid=6, filesize=5.0 K 2024-11-24T08:33:11,736 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false 2024-11-24T08:33:11,739 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-24T08:33:11,740 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T08:33:11,740 INFO [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T08:33:11,740 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732437191707Running coprocessor pre-close hooks at 1732437191707Disabling compacts and flushes for region at 1732437191707Disabling writes for close at 1732437191707Obtaining lock to block concurrent updates at 1732437191708 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732437191708Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732437191708Flushing stores of hbase:meta,,1.1588230740 at 1732437191708Flushing 1588230740/ns: creating writer at 1732437191708Flushing 1588230740/ns: appending metadata at 1732437191722 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732437191722Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4bb58575: reopening flushed file at 1732437191731 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false at 1732437191736 (+5 ms)Writing region close event to WAL at 1732437191736Running coprocessor post-close hooks at 1732437191740 (+4 ms)Closed at 1732437191740 2024-11-24T08:33:11,740 DEBUG [RS_CLOSE_META-regionserver/30c28c82771d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T08:33:11,907 INFO [RS:0;30c28c82771d:43215 {}] regionserver.HRegionServer(976): stopping server 30c28c82771d,43215,1732437190863; all regions closed. 2024-11-24T08:33:11,908 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:11,908 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:11,908 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:11,908 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:11,908 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:11,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45849 is added to blk_1073741834_1010 (size=1152) 2024-11-24T08:33:11,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34369 is added to blk_1073741834_1010 (size=1152) 2024-11-24T08:33:11,912 DEBUG [RS:0;30c28c82771d:43215 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/oldWALs 2024-11-24T08:33:11,912 INFO [RS:0;30c28c82771d:43215 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30c28c82771d%2C43215%2C1732437190863.meta:.meta(num 1732437191615) 2024-11-24T08:33:11,913 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:11,913 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:11,913 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:11,913 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:11,913 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:11,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34369 is added to blk_1073741833_1009 (size=93) 2024-11-24T08:33:11,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45849 is added to blk_1073741833_1009 (size=93) 2024-11-24T08:33:11,916 DEBUG [RS:0;30c28c82771d:43215 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/oldWALs 2024-11-24T08:33:11,916 INFO [RS:0;30c28c82771d:43215 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 30c28c82771d%2C43215%2C1732437190863:(num 1732437191243) 2024-11-24T08:33:11,916 DEBUG [RS:0;30c28c82771d:43215 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T08:33:11,916 INFO [RS:0;30c28c82771d:43215 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T08:33:11,916 INFO [RS:0;30c28c82771d:43215 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:33:11,917 INFO [RS:0;30c28c82771d:43215 {}] hbase.ChoreService(370): Chore service for: regionserver/30c28c82771d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T08:33:11,917 INFO [RS:0;30c28c82771d:43215 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:33:11,917 INFO [regionserver/30c28c82771d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:33:11,917 INFO [RS:0;30c28c82771d:43215 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43215 2024-11-24T08:33:11,919 INFO [RS:0;30c28c82771d:43215 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:33:11,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T08:33:11,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43215-0x101491b93200001, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/30c28c82771d,43215,1732437190863 2024-11-24T08:33:11,920 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [30c28c82771d,43215,1732437190863] 2024-11-24T08:33:11,922 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/30c28c82771d,43215,1732437190863 already deleted, retry=false 2024-11-24T08:33:11,922 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 30c28c82771d,43215,1732437190863 expired; onlineServers=0 2024-11-24T08:33:11,922 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '30c28c82771d,34897,1732437190815' ***** 2024-11-24T08:33:11,922 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T08:33:11,923 INFO [M:0;30c28c82771d:34897 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T08:33:11,923 INFO [M:0;30c28c82771d:34897 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T08:33:11,923 DEBUG [M:0;30c28c82771d:34897 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T08:33:11,923 DEBUG [M:0;30c28c82771d:34897 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T08:33:11,923 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T08:33:11,923 DEBUG [master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732437191016 {}] cleaner.HFileCleaner(306): Exit Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.large.0-1732437191016,5,FailOnTimeoutGroup] 2024-11-24T08:33:11,923 DEBUG [master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732437191016 {}] cleaner.HFileCleaner(306): Exit Thread[master/30c28c82771d:0:becomeActiveMaster-HFileCleaner.small.0-1732437191016,5,FailOnTimeoutGroup] 2024-11-24T08:33:11,923 INFO [M:0;30c28c82771d:34897 {}] hbase.ChoreService(370): Chore service for: master/30c28c82771d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T08:33:11,923 INFO [M:0;30c28c82771d:34897 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T08:33:11,923 DEBUG [M:0;30c28c82771d:34897 {}] master.HMaster(1795): Stopping service threads 2024-11-24T08:33:11,923 INFO [M:0;30c28c82771d:34897 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T08:33:11,923 INFO [M:0;30c28c82771d:34897 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T08:33:11,923 INFO [M:0;30c28c82771d:34897 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T08:33:11,923 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T08:33:11,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T08:33:11,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T08:33:11,924 DEBUG [M:0;30c28c82771d:34897 {}] zookeeper.ZKUtil(347): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T08:33:11,924 WARN [M:0;30c28c82771d:34897 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T08:33:11,924 INFO [M:0;30c28c82771d:34897 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/.lastflushedseqids 2024-11-24T08:33:11,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34369 is added to blk_1073741838_1014 (size=99) 2024-11-24T08:33:11,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45849 is added to blk_1073741838_1014 (size=99) 2024-11-24T08:33:11,931 INFO [M:0;30c28c82771d:34897 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T08:33:11,931 INFO [M:0;30c28c82771d:34897 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T08:33:11,931 DEBUG [M:0;30c28c82771d:34897 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T08:33:11,931 INFO [M:0;30c28c82771d:34897 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:33:11,931 DEBUG [M:0;30c28c82771d:34897 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:33:11,931 DEBUG [M:0;30c28c82771d:34897 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T08:33:11,931 DEBUG [M:0;30c28c82771d:34897 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:33:11,931 INFO [M:0;30c28c82771d:34897 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-24T08:33:11,946 DEBUG [M:0;30c28c82771d:34897 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ea808ef784ad4ab88548c2d57b5ed1cd is 82, key is hbase:meta,,1/info:regioninfo/1732437191639/Put/seqid=0 2024-11-24T08:33:11,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45849 is added to blk_1073741839_1015 (size=5672) 2024-11-24T08:33:11,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34369 is added to blk_1073741839_1015 (size=5672) 2024-11-24T08:33:11,951 INFO [M:0;30c28c82771d:34897 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ea808ef784ad4ab88548c2d57b5ed1cd 2024-11-24T08:33:11,968 DEBUG [M:0;30c28c82771d:34897 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/469135b89d834555a911a96f3ebc4a29 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732437191656/Put/seqid=0 2024-11-24T08:33:11,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34369 is added to blk_1073741840_1016 (size=5275) 2024-11-24T08:33:11,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45849 is added to blk_1073741840_1016 (size=5275) 2024-11-24T08:33:11,973 INFO [M:0;30c28c82771d:34897 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/469135b89d834555a911a96f3ebc4a29 2024-11-24T08:33:11,991 DEBUG [M:0;30c28c82771d:34897 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8fb5e080ba8746a9ac33a0abffdd5a7f is 69, key is 30c28c82771d,43215,1732437190863/rs:state/1732437191097/Put/seqid=0 2024-11-24T08:33:11,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45849 is added to blk_1073741841_1017 (size=5156) 2024-11-24T08:33:11,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34369 is added to blk_1073741841_1017 (size=5156) 2024-11-24T08:33:11,996 INFO [M:0;30c28c82771d:34897 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8fb5e080ba8746a9ac33a0abffdd5a7f 2024-11-24T08:33:12,012 DEBUG [M:0;30c28c82771d:34897 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e5f94209df6f47caa8f44ab4c97ca697 is 52, key is load_balancer_on/state:d/1732437191686/Put/seqid=0 2024-11-24T08:33:12,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45849 is added to blk_1073741842_1018 (size=5056) 2024-11-24T08:33:12,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34369 is added to blk_1073741842_1018 (size=5056) 2024-11-24T08:33:12,016 INFO [M:0;30c28c82771d:34897 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e5f94209df6f47caa8f44ab4c97ca697 2024-11-24T08:33:12,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43215-0x101491b93200001, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:33:12,020 INFO [RS:0;30c28c82771d:43215 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:33:12,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43215-0x101491b93200001, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:33:12,020 INFO [RS:0;30c28c82771d:43215 {}] regionserver.HRegionServer(1031): Exiting; stopping=30c28c82771d,43215,1732437190863; zookeeper connection closed. 2024-11-24T08:33:12,020 DEBUG [M:0;30c28c82771d:34897 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ea808ef784ad4ab88548c2d57b5ed1cd as hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ea808ef784ad4ab88548c2d57b5ed1cd 2024-11-24T08:33:12,020 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@63a9d19 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@63a9d19 2024-11-24T08:33:12,021 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T08:33:12,024 INFO [M:0;30c28c82771d:34897 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ea808ef784ad4ab88548c2d57b5ed1cd, entries=8, sequenceid=29, filesize=5.5 K 2024-11-24T08:33:12,025 DEBUG [M:0;30c28c82771d:34897 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/469135b89d834555a911a96f3ebc4a29 as hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/469135b89d834555a911a96f3ebc4a29 2024-11-24T08:33:12,028 INFO [M:0;30c28c82771d:34897 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/469135b89d834555a911a96f3ebc4a29, entries=3, sequenceid=29, filesize=5.2 K 2024-11-24T08:33:12,029 DEBUG [M:0;30c28c82771d:34897 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8fb5e080ba8746a9ac33a0abffdd5a7f as hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8fb5e080ba8746a9ac33a0abffdd5a7f 2024-11-24T08:33:12,033 INFO [M:0;30c28c82771d:34897 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8fb5e080ba8746a9ac33a0abffdd5a7f, entries=1, sequenceid=29, filesize=5.0 K 2024-11-24T08:33:12,033 DEBUG [M:0;30c28c82771d:34897 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e5f94209df6f47caa8f44ab4c97ca697 as hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e5f94209df6f47caa8f44ab4c97ca697 2024-11-24T08:33:12,037 INFO [M:0;30c28c82771d:34897 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36285/user/jenkins/test-data/64388312-678b-16b0-085a-3a9349b26bad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e5f94209df6f47caa8f44ab4c97ca697, entries=1, sequenceid=29, filesize=4.9 K 2024-11-24T08:33:12,037 INFO [M:0;30c28c82771d:34897 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 106ms, sequenceid=29, compaction requested=false 2024-11-24T08:33:12,039 INFO [M:0;30c28c82771d:34897 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T08:33:12,039 DEBUG [M:0;30c28c82771d:34897 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732437191931Disabling compacts and flushes for region at 1732437191931Disabling writes for close at 1732437191931Obtaining lock to block concurrent updates at 1732437191931Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732437191931Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732437191932 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732437191932Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732437191932Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732437191946 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732437191946Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732437191954 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732437191968 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732437191968Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732437191976 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732437191991 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732437191991Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732437191999 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732437192012 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732437192012Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b67f2bb: reopening flushed file at 1732437192020 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42f59876: reopening flushed file at 1732437192024 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ec39bb7: reopening flushed file at 1732437192028 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@336dc53a: reopening flushed file at 1732437192033 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 106ms, sequenceid=29, compaction requested=false at 1732437192037 (+4 ms)Writing region close event to WAL at 1732437192039 (+2 ms)Closed at 1732437192039 2024-11-24T08:33:12,039 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:12,039 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:12,039 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:12,040 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:12,040 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T08:33:12,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45849 is added to blk_1073741830_1006 (size=10311) 2024-11-24T08:33:12,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34369 is added to blk_1073741830_1006 (size=10311) 2024-11-24T08:33:12,042 INFO [M:0;30c28c82771d:34897 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T08:33:12,042 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T08:33:12,042 INFO [M:0;30c28c82771d:34897 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34897 2024-11-24T08:33:12,042 INFO [M:0;30c28c82771d:34897 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T08:33:12,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:33:12,144 INFO [M:0;30c28c82771d:34897 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T08:33:12,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101491b93200000, quorum=127.0.0.1:58437, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T08:33:12,146 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@40d0e54c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:33:12,147 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@372003a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:33:12,147 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:33:12,147 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@186f146f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:33:12,147 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@644054b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/hadoop.log.dir/,STOPPED} 2024-11-24T08:33:12,148 WARN [BP-488763426-172.17.0.2-1732437190157 heartbeating to localhost/127.0.0.1:36285 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:33:12,148 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:33:12,148 WARN [BP-488763426-172.17.0.2-1732437190157 heartbeating to localhost/127.0.0.1:36285 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-488763426-172.17.0.2-1732437190157 (Datanode Uuid 53e5c375-1367-4008-93b0-1e6c5a41e744) service to localhost/127.0.0.1:36285 2024-11-24T08:33:12,148 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:33:12,149 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/cluster_d841ac18-6f81-863b-4c84-bbd270c9823a/data/data3/current/BP-488763426-172.17.0.2-1732437190157 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:33:12,149 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/cluster_d841ac18-6f81-863b-4c84-bbd270c9823a/data/data4/current/BP-488763426-172.17.0.2-1732437190157 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:33:12,149 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:33:12,151 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@223a801d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T08:33:12,151 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@8035060{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:33:12,151 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:33:12,151 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49428dc3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:33:12,151 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56836339{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/hadoop.log.dir/,STOPPED} 2024-11-24T08:33:12,152 WARN [BP-488763426-172.17.0.2-1732437190157 heartbeating to localhost/127.0.0.1:36285 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T08:33:12,152 WARN [BP-488763426-172.17.0.2-1732437190157 heartbeating to localhost/127.0.0.1:36285 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-488763426-172.17.0.2-1732437190157 (Datanode Uuid 15a6526b-f705-4e05-aee2-db14731c77fd) service to localhost/127.0.0.1:36285 2024-11-24T08:33:12,152 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T08:33:12,152 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T08:33:12,153 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/cluster_d841ac18-6f81-863b-4c84-bbd270c9823a/data/data1/current/BP-488763426-172.17.0.2-1732437190157 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:33:12,153 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/cluster_d841ac18-6f81-863b-4c84-bbd270c9823a/data/data2/current/BP-488763426-172.17.0.2-1732437190157 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T08:33:12,153 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T08:33:12,158 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6dcbbc83{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T08:33:12,159 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1802ed83{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T08:33:12,159 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T08:33:12,159 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7cdb81da{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T08:33:12,159 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2394ff19{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9bb5220b-ead7-6331-1f7d-6308548cdf57/hadoop.log.dir/,STOPPED} 2024-11-24T08:33:12,165 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T08:33:12,182 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T08:33:12,191 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=269 (was 232) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36285 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:36285 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36285 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36285 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36285 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36285 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36285 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36285 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=534 (was 503) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=21 (was 21), ProcessCount=11 (was 11), AvailableMemoryMB=6259 (was 6269)