2024-11-13 11:25:58,724 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-13 11:25:58,745 main DEBUG Took 0.017479 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-13 11:25:58,745 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-13 11:25:58,746 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-13 11:25:58,747 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-13 11:25:58,748 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 11:25:58,757 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-13 11:25:58,778 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 11:25:58,780 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 11:25:58,781 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 11:25:58,781 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 11:25:58,782 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 11:25:58,782 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 11:25:58,783 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 11:25:58,783 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 11:25:58,784 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 11:25:58,784 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 11:25:58,785 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 11:25:58,785 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 11:25:58,786 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 11:25:58,786 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 11:25:58,787 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 11:25:58,787 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 11:25:58,787 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 11:25:58,788 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 11:25:58,789 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 11:25:58,789 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 11:25:58,790 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 11:25:58,790 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 11:25:58,790 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 11:25:58,791 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 11:25:58,791 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 11:25:58,792 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-13 11:25:58,794 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 11:25:58,795 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-13 11:25:58,798 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-13 11:25:58,798 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-13 11:25:58,800 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-13 11:25:58,801 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-13 11:25:58,810 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-13 11:25:58,813 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-13 11:25:58,815 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-13 11:25:58,815 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-13 11:25:58,816 main DEBUG createAppenders(={Console}) 2024-11-13 11:25:58,817 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-13 11:25:58,817 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-13 11:25:58,818 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-13 11:25:58,818 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-13 11:25:58,818 main DEBUG OutputStream closed 2024-11-13 11:25:58,819 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-13 11:25:58,819 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-13 11:25:58,819 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-13 11:25:58,909 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-13 11:25:58,912 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-13 11:25:58,914 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-13 11:25:58,916 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-13 11:25:58,917 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-13 11:25:58,917 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-13 11:25:58,918 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-13 11:25:58,918 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-13 11:25:58,919 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-13 11:25:58,919 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-13 11:25:58,920 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-13 11:25:58,920 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-13 11:25:58,921 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-13 11:25:58,921 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-13 11:25:58,922 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-13 11:25:58,922 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-13 11:25:58,922 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-13 11:25:58,924 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-13 11:25:58,927 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-13 11:25:58,927 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-13 11:25:58,928 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-13 11:25:58,929 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-13T11:25:59,223 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d 2024-11-13 11:25:59,227 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-13 11:25:59,227 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-13T11:25:59,239 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-13T11:25:59,292 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=444, ProcessCount=11, AvailableMemoryMB=4107 2024-11-13T11:25:59,297 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T11:25:59,321 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/cluster_2d346bbe-7588-fba0-5ea0-79662cb4f71f, deleteOnExit=true 2024-11-13T11:25:59,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T11:25:59,322 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/test.cache.data in system properties and HBase conf 2024-11-13T11:25:59,323 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T11:25:59,324 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/hadoop.log.dir in system properties and HBase conf 2024-11-13T11:25:59,324 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T11:25:59,325 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T11:25:59,325 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T11:25:59,442 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-13T11:25:59,548 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T11:25:59,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T11:25:59,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T11:25:59,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T11:25:59,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T11:25:59,555 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T11:25:59,556 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T11:25:59,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T11:25:59,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T11:25:59,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T11:25:59,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/nfs.dump.dir in system properties and HBase conf 2024-11-13T11:25:59,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/java.io.tmpdir in system properties and HBase conf 2024-11-13T11:25:59,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T11:25:59,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T11:25:59,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T11:26:00,101 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T11:26:00,472 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-13T11:26:00,575 INFO [Time-limited test {}] log.Log(170): Logging initialized @2734ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-13T11:26:00,673 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:26:00,750 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:26:00,773 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:26:00,773 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:26:00,775 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T11:26:00,794 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:26:00,797 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3aee6cb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:26:00,799 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@380b8195{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:26:01,009 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6de997b9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/java.io.tmpdir/jetty-localhost-39905-hadoop-hdfs-3_4_1-tests_jar-_-any-5828237778677296322/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T11:26:01,018 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a0da00a{HTTP/1.1, (http/1.1)}{localhost:39905} 2024-11-13T11:26:01,019 INFO [Time-limited test {}] server.Server(415): Started @3179ms 2024-11-13T11:26:01,056 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T11:26:01,491 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:26:01,505 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:26:01,506 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:26:01,507 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:26:01,507 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T11:26:01,510 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26c88bf4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:26:01,511 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@621a7cbc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:26:01,650 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@11e88411{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/java.io.tmpdir/jetty-localhost-45931-hadoop-hdfs-3_4_1-tests_jar-_-any-1030825771919760795/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:26:01,651 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75423500{HTTP/1.1, (http/1.1)}{localhost:45931} 2024-11-13T11:26:01,652 INFO [Time-limited test {}] server.Server(415): Started @3812ms 2024-11-13T11:26:01,739 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:26:01,892 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:26:01,903 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:26:01,907 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:26:01,908 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:26:01,908 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T11:26:01,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43794ae7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:26:01,914 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35f1cf70{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:26:02,053 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@590b36b7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/java.io.tmpdir/jetty-localhost-39465-hadoop-hdfs-3_4_1-tests_jar-_-any-14803601259821572295/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:26:02,054 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@237fc06a{HTTP/1.1, (http/1.1)}{localhost:39465} 2024-11-13T11:26:02,055 INFO [Time-limited test {}] server.Server(415): Started @4215ms 2024-11-13T11:26:02,058 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:26:02,236 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/cluster_2d346bbe-7588-fba0-5ea0-79662cb4f71f/data/data3/current/BP-92278369-172.17.0.2-1731497160224/current, will proceed with Du for space computation calculation, 2024-11-13T11:26:02,240 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/cluster_2d346bbe-7588-fba0-5ea0-79662cb4f71f/data/data4/current/BP-92278369-172.17.0.2-1731497160224/current, will proceed with Du for space computation calculation, 2024-11-13T11:26:02,250 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/cluster_2d346bbe-7588-fba0-5ea0-79662cb4f71f/data/data1/current/BP-92278369-172.17.0.2-1731497160224/current, will proceed with Du for space computation calculation, 2024-11-13T11:26:02,270 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/cluster_2d346bbe-7588-fba0-5ea0-79662cb4f71f/data/data2/current/BP-92278369-172.17.0.2-1731497160224/current, will proceed with Du for space computation calculation, 2024-11-13T11:26:02,401 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:26:02,403 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:26:02,491 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd9e5a00f406a92b5 with lease ID 0x70753bbfd8415567: Processing first storage report for DS-02669440-8167-4429-b516-6b6e405a44a1 from datanode DatanodeRegistration(127.0.0.1:41663, datanodeUuid=576c2c7c-6912-4166-9b93-f20d35dbf493, infoPort=40867, infoSecurePort=0, ipcPort=46039, storageInfo=lv=-57;cid=testClusterID;nsid=1014882433;c=1731497160224) 2024-11-13T11:26:02,492 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd9e5a00f406a92b5 with lease ID 0x70753bbfd8415567: from storage DS-02669440-8167-4429-b516-6b6e405a44a1 node DatanodeRegistration(127.0.0.1:41663, datanodeUuid=576c2c7c-6912-4166-9b93-f20d35dbf493, infoPort=40867, infoSecurePort=0, ipcPort=46039, storageInfo=lv=-57;cid=testClusterID;nsid=1014882433;c=1731497160224), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-13T11:26:02,493 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xca1aefd3b9a09b06 with lease ID 0x70753bbfd8415568: Processing first storage report for DS-602ddc0b-8347-4402-b835-e64ddb0125fa from datanode DatanodeRegistration(127.0.0.1:35943, datanodeUuid=c419622e-e76f-41e1-a5ac-b914064dd84a, infoPort=39697, infoSecurePort=0, ipcPort=41767, storageInfo=lv=-57;cid=testClusterID;nsid=1014882433;c=1731497160224) 2024-11-13T11:26:02,493 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xca1aefd3b9a09b06 with lease ID 0x70753bbfd8415568: from storage DS-602ddc0b-8347-4402-b835-e64ddb0125fa node DatanodeRegistration(127.0.0.1:35943, datanodeUuid=c419622e-e76f-41e1-a5ac-b914064dd84a, infoPort=39697, infoSecurePort=0, ipcPort=41767, storageInfo=lv=-57;cid=testClusterID;nsid=1014882433;c=1731497160224), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:26:02,494 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd9e5a00f406a92b5 with lease ID 0x70753bbfd8415567: Processing first storage report for DS-915c07e4-84d6-40ef-b306-613dbadae26d from datanode DatanodeRegistration(127.0.0.1:41663, datanodeUuid=576c2c7c-6912-4166-9b93-f20d35dbf493, infoPort=40867, infoSecurePort=0, ipcPort=46039, storageInfo=lv=-57;cid=testClusterID;nsid=1014882433;c=1731497160224) 2024-11-13T11:26:02,494 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd9e5a00f406a92b5 with lease ID 0x70753bbfd8415567: from storage DS-915c07e4-84d6-40ef-b306-613dbadae26d node DatanodeRegistration(127.0.0.1:41663, datanodeUuid=576c2c7c-6912-4166-9b93-f20d35dbf493, infoPort=40867, infoSecurePort=0, ipcPort=46039, storageInfo=lv=-57;cid=testClusterID;nsid=1014882433;c=1731497160224), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T11:26:02,494 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xca1aefd3b9a09b06 with lease ID 0x70753bbfd8415568: Processing first storage report for DS-101d8f4a-e7c4-4571-810d-2abf9b786aee from datanode DatanodeRegistration(127.0.0.1:35943, datanodeUuid=c419622e-e76f-41e1-a5ac-b914064dd84a, infoPort=39697, infoSecurePort=0, ipcPort=41767, storageInfo=lv=-57;cid=testClusterID;nsid=1014882433;c=1731497160224) 2024-11-13T11:26:02,494 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xca1aefd3b9a09b06 with lease ID 0x70753bbfd8415568: from storage DS-101d8f4a-e7c4-4571-810d-2abf9b786aee node DatanodeRegistration(127.0.0.1:35943, datanodeUuid=c419622e-e76f-41e1-a5ac-b914064dd84a, infoPort=39697, infoSecurePort=0, ipcPort=41767, storageInfo=lv=-57;cid=testClusterID;nsid=1014882433;c=1731497160224), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:26:02,553 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d 2024-11-13T11:26:02,660 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/cluster_2d346bbe-7588-fba0-5ea0-79662cb4f71f/zookeeper_0, clientPort=49981, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/cluster_2d346bbe-7588-fba0-5ea0-79662cb4f71f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/cluster_2d346bbe-7588-fba0-5ea0-79662cb4f71f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T11:26:02,679 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49981 2024-11-13T11:26:02,692 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:26:02,695 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:26:02,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741825_1001 (size=7) 2024-11-13T11:26:02,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741825_1001 (size=7) 2024-11-13T11:26:03,392 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e with version=8 2024-11-13T11:26:03,393 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/hbase-staging 2024-11-13T11:26:03,487 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-13T11:26:03,724 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7bf281cf3991:0 server-side Connection retries=45 2024-11-13T11:26:03,735 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:26:03,735 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T11:26:03,739 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T11:26:03,739 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:26:03,740 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T11:26:03,902 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T11:26:03,971 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-13T11:26:03,984 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-13T11:26:03,988 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T11:26:04,014 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 82065 (auto-detected) 2024-11-13T11:26:04,015 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-13T11:26:04,036 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40685 2024-11-13T11:26:04,059 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40685 connecting to ZooKeeper ensemble=127.0.0.1:49981 2024-11-13T11:26:04,090 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:406850x0, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T11:26:04,093 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40685-0x10038d553fb0000 connected 2024-11-13T11:26:04,122 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:26:04,125 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:26:04,134 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:26:04,140 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e, hbase.cluster.distributed=false 2024-11-13T11:26:04,171 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T11:26:04,178 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40685 2024-11-13T11:26:04,185 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40685 2024-11-13T11:26:04,185 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40685 2024-11-13T11:26:04,189 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40685 2024-11-13T11:26:04,190 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40685 2024-11-13T11:26:04,326 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7bf281cf3991:0 server-side Connection retries=45 2024-11-13T11:26:04,329 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:26:04,332 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T11:26:04,332 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T11:26:04,332 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:26:04,333 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T11:26:04,337 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T11:26:04,341 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T11:26:04,343 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35781 2024-11-13T11:26:04,346 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35781 connecting to ZooKeeper ensemble=127.0.0.1:49981 2024-11-13T11:26:04,348 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:26:04,354 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:26:04,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:357810x0, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T11:26:04,373 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35781-0x10038d553fb0001 connected 2024-11-13T11:26:04,379 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:26:04,385 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T11:26:04,401 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T11:26:04,404 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T11:26:04,412 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T11:26:04,413 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35781 2024-11-13T11:26:04,417 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35781 2024-11-13T11:26:04,419 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35781 2024-11-13T11:26:04,422 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35781 2024-11-13T11:26:04,425 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35781 2024-11-13T11:26:04,444 DEBUG [M:0;7bf281cf3991:40685 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7bf281cf3991:40685 2024-11-13T11:26:04,446 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7bf281cf3991,40685,1731497163541 2024-11-13T11:26:04,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:26:04,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:26:04,458 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7bf281cf3991,40685,1731497163541 2024-11-13T11:26:04,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T11:26:04,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:26:04,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:26:04,481 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T11:26:04,486 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7bf281cf3991,40685,1731497163541 from backup master directory 2024-11-13T11:26:04,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7bf281cf3991,40685,1731497163541 2024-11-13T11:26:04,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:26:04,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:26:04,489 WARN [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T11:26:04,490 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7bf281cf3991,40685,1731497163541 2024-11-13T11:26:04,492 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-13T11:26:04,493 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-13T11:26:04,569 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/hbase.id] with ID: 48c70c1e-8c3b-4e8c-b3d3-d775f9b370ad 2024-11-13T11:26:04,569 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/.tmp/hbase.id 2024-11-13T11:26:04,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741826_1002 (size=42) 2024-11-13T11:26:04,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741826_1002 (size=42) 2024-11-13T11:26:04,598 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/.tmp/hbase.id]:[hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/hbase.id] 2024-11-13T11:26:04,661 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:26:04,668 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T11:26:04,693 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 22ms. 2024-11-13T11:26:04,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:26:04,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:26:04,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741827_1003 (size=196) 2024-11-13T11:26:04,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741827_1003 (size=196) 2024-11-13T11:26:04,755 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T11:26:04,758 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T11:26:04,765 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:26:04,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741828_1004 (size=1189) 2024-11-13T11:26:04,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741828_1004 (size=1189) 2024-11-13T11:26:04,836 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store 2024-11-13T11:26:04,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741829_1005 (size=34) 2024-11-13T11:26:04,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741829_1005 (size=34) 2024-11-13T11:26:04,865 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-13T11:26:04,868 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:26:04,870 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T11:26:04,870 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:26:04,870 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:26:04,872 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T11:26:04,873 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:26:04,873 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:26:04,875 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731497164870Disabling compacts and flushes for region at 1731497164870Disabling writes for close at 1731497164872 (+2 ms)Writing region close event to WAL at 1731497164873 (+1 ms)Closed at 1731497164873 2024-11-13T11:26:04,878 WARN [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/.initializing 2024-11-13T11:26:04,878 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/WALs/7bf281cf3991,40685,1731497163541 2024-11-13T11:26:04,905 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C40685%2C1731497163541, suffix=, logDir=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/WALs/7bf281cf3991,40685,1731497163541, archiveDir=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/oldWALs, maxLogs=10 2024-11-13T11:26:04,913 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C40685%2C1731497163541.1731497164909 2024-11-13T11:26:04,939 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/WALs/7bf281cf3991,40685,1731497163541/7bf281cf3991%2C40685%2C1731497163541.1731497164909 2024-11-13T11:26:04,953 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39697:39697),(127.0.0.1/127.0.0.1:40867:40867)] 2024-11-13T11:26:04,959 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T11:26:04,960 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:26:04,964 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:26:04,966 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:26:05,016 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:26:05,047 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T11:26:05,052 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:26:05,056 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:26:05,056 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:26:05,059 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T11:26:05,059 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:26:05,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:26:05,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:26:05,063 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T11:26:05,063 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:26:05,064 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:26:05,064 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:26:05,067 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T11:26:05,067 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:26:05,068 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:26:05,068 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:26:05,078 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:26:05,079 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:26:05,084 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:26:05,084 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:26:05,088 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T11:26:05,091 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:26:05,096 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T11:26:05,099 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=706916, jitterRate=-0.10111027956008911}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T11:26:05,106 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731497164984Initializing all the Stores at 1731497164987 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497164988 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497164992 (+4 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497164993 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497164993Cleaning up temporary data from old regions at 1731497165084 (+91 ms)Region opened successfully at 1731497165106 (+22 ms) 2024-11-13T11:26:05,107 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T11:26:05,144 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7eb41786, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7bf281cf3991/172.17.0.2:0 2024-11-13T11:26:05,177 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T11:26:05,190 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T11:26:05,190 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T11:26:05,194 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T11:26:05,197 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 3 msec 2024-11-13T11:26:05,202 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-13T11:26:05,202 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T11:26:05,236 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T11:26:05,247 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T11:26:05,249 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T11:26:05,252 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T11:26:05,254 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T11:26:05,255 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T11:26:05,258 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T11:26:05,262 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T11:26:05,264 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T11:26:05,266 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T11:26:05,267 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T11:26:05,284 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T11:26:05,285 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T11:26:05,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T11:26:05,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T11:26:05,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:26:05,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:26:05,293 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7bf281cf3991,40685,1731497163541, sessionid=0x10038d553fb0000, setting cluster-up flag (Was=false) 2024-11-13T11:26:05,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:26:05,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:26:05,309 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T11:26:05,311 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7bf281cf3991,40685,1731497163541 2024-11-13T11:26:05,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:26:05,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:26:05,321 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T11:26:05,323 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7bf281cf3991,40685,1731497163541 2024-11-13T11:26:05,329 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T11:26:05,332 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.HRegionServer(746): ClusterId : 48c70c1e-8c3b-4e8c-b3d3-d775f9b370ad 2024-11-13T11:26:05,335 DEBUG [RS:0;7bf281cf3991:35781 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T11:26:05,340 DEBUG [RS:0;7bf281cf3991:35781 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T11:26:05,340 DEBUG [RS:0;7bf281cf3991:35781 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T11:26:05,343 DEBUG [RS:0;7bf281cf3991:35781 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T11:26:05,343 DEBUG [RS:0;7bf281cf3991:35781 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d32cd8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7bf281cf3991/172.17.0.2:0 2024-11-13T11:26:05,358 DEBUG [RS:0;7bf281cf3991:35781 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7bf281cf3991:35781 2024-11-13T11:26:05,362 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T11:26:05,362 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T11:26:05,362 DEBUG [RS:0;7bf281cf3991:35781 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T11:26:05,365 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.HRegionServer(2659): reportForDuty to master=7bf281cf3991,40685,1731497163541 with port=35781, startcode=1731497164277 2024-11-13T11:26:05,378 DEBUG [RS:0;7bf281cf3991:35781 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T11:26:05,406 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T11:26:05,418 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T11:26:05,427 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T11:26:05,433 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7bf281cf3991,40685,1731497163541 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T11:26:05,442 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:26:05,442 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:26:05,442 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:26:05,442 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:26:05,442 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7bf281cf3991:0, corePoolSize=10, maxPoolSize=10 2024-11-13T11:26:05,442 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:26:05,443 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7bf281cf3991:0, corePoolSize=2, maxPoolSize=2 2024-11-13T11:26:05,443 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:26:05,450 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731497195450 2024-11-13T11:26:05,450 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52045, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T11:26:05,452 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:26:05,452 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T11:26:05,452 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T11:26:05,453 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T11:26:05,457 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T11:26:05,457 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T11:26:05,458 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T11:26:05,458 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T11:26:05,459 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:26:05,459 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T11:26:05,458 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:05,457 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40685 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-13T11:26:05,463 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T11:26:05,465 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T11:26:05,466 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T11:26:05,468 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T11:26:05,468 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T11:26:05,470 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497165469,5,FailOnTimeoutGroup] 2024-11-13T11:26:05,471 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497165470,5,FailOnTimeoutGroup] 2024-11-13T11:26:05,471 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:05,471 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T11:26:05,474 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:05,475 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:05,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741831_1007 (size=1321) 2024-11-13T11:26:05,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741831_1007 (size=1321) 2024-11-13T11:26:05,479 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T11:26:05,480 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e 2024-11-13T11:26:05,493 DEBUG [RS:0;7bf281cf3991:35781 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-13T11:26:05,493 WARN [RS:0;7bf281cf3991:35781 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-13T11:26:05,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741832_1008 (size=32) 2024-11-13T11:26:05,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741832_1008 (size=32) 2024-11-13T11:26:05,498 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:26:05,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T11:26:05,504 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T11:26:05,504 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:26:05,505 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:26:05,505 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T11:26:05,509 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T11:26:05,509 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:26:05,510 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:26:05,510 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T11:26:05,513 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T11:26:05,513 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:26:05,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:26:05,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T11:26:05,517 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T11:26:05,518 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:26:05,519 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:26:05,519 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T11:26:05,520 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740 2024-11-13T11:26:05,521 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740 2024-11-13T11:26:05,525 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T11:26:05,525 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T11:26:05,526 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T11:26:05,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T11:26:05,538 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T11:26:05,539 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=732778, jitterRate=-0.06822562217712402}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T11:26:05,542 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731497165498Initializing all the Stores at 1731497165500 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497165500Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497165500Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497165500Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497165500Cleaning up temporary data from old regions at 1731497165525 (+25 ms)Region opened successfully at 1731497165542 (+17 ms) 2024-11-13T11:26:05,543 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T11:26:05,543 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T11:26:05,543 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T11:26:05,543 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T11:26:05,543 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T11:26:05,544 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T11:26:05,544 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731497165542Disabling compacts and flushes for region at 1731497165542Disabling writes for close at 1731497165543 (+1 ms)Writing region close event to WAL at 1731497165544 (+1 ms)Closed at 1731497165544 2024-11-13T11:26:05,548 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:26:05,548 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T11:26:05,557 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T11:26:05,566 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T11:26:05,569 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T11:26:05,595 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.HRegionServer(2659): reportForDuty to master=7bf281cf3991,40685,1731497163541 with port=35781, startcode=1731497164277 2024-11-13T11:26:05,597 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40685 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7bf281cf3991,35781,1731497164277 2024-11-13T11:26:05,600 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40685 {}] master.ServerManager(517): Registering regionserver=7bf281cf3991,35781,1731497164277 2024-11-13T11:26:05,607 DEBUG [RS:0;7bf281cf3991:35781 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e 2024-11-13T11:26:05,607 DEBUG [RS:0;7bf281cf3991:35781 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42441 2024-11-13T11:26:05,607 DEBUG [RS:0;7bf281cf3991:35781 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T11:26:05,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T11:26:05,611 DEBUG [RS:0;7bf281cf3991:35781 {}] zookeeper.ZKUtil(111): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7bf281cf3991,35781,1731497164277 2024-11-13T11:26:05,612 WARN [RS:0;7bf281cf3991:35781 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T11:26:05,612 INFO [RS:0;7bf281cf3991:35781 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:26:05,612 DEBUG [RS:0;7bf281cf3991:35781 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277 2024-11-13T11:26:05,614 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7bf281cf3991,35781,1731497164277] 2024-11-13T11:26:05,638 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T11:26:05,655 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T11:26:05,662 INFO [RS:0;7bf281cf3991:35781 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T11:26:05,662 INFO [RS:0;7bf281cf3991:35781 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:05,665 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T11:26:05,670 INFO [RS:0;7bf281cf3991:35781 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T11:26:05,672 INFO [RS:0;7bf281cf3991:35781 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:05,672 DEBUG [RS:0;7bf281cf3991:35781 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:26:05,672 DEBUG [RS:0;7bf281cf3991:35781 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:26:05,672 DEBUG [RS:0;7bf281cf3991:35781 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:26:05,672 DEBUG [RS:0;7bf281cf3991:35781 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:26:05,673 DEBUG [RS:0;7bf281cf3991:35781 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:26:05,673 DEBUG [RS:0;7bf281cf3991:35781 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7bf281cf3991:0, corePoolSize=2, maxPoolSize=2 2024-11-13T11:26:05,673 DEBUG [RS:0;7bf281cf3991:35781 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:26:05,673 DEBUG [RS:0;7bf281cf3991:35781 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:26:05,673 DEBUG [RS:0;7bf281cf3991:35781 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:26:05,674 DEBUG [RS:0;7bf281cf3991:35781 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:26:05,674 DEBUG [RS:0;7bf281cf3991:35781 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:26:05,674 DEBUG [RS:0;7bf281cf3991:35781 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:26:05,674 DEBUG [RS:0;7bf281cf3991:35781 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7bf281cf3991:0, corePoolSize=3, maxPoolSize=3 2024-11-13T11:26:05,675 DEBUG [RS:0;7bf281cf3991:35781 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0, corePoolSize=3, maxPoolSize=3 2024-11-13T11:26:05,677 INFO [RS:0;7bf281cf3991:35781 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:05,678 INFO [RS:0;7bf281cf3991:35781 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:05,678 INFO [RS:0;7bf281cf3991:35781 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:05,678 INFO [RS:0;7bf281cf3991:35781 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:05,678 INFO [RS:0;7bf281cf3991:35781 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:05,678 INFO [RS:0;7bf281cf3991:35781 {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,35781,1731497164277-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T11:26:05,706 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T11:26:05,708 INFO [RS:0;7bf281cf3991:35781 {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,35781,1731497164277-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:05,709 INFO [RS:0;7bf281cf3991:35781 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:05,709 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.Replication(171): 7bf281cf3991,35781,1731497164277 started 2024-11-13T11:26:05,720 WARN [7bf281cf3991:40685 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T11:26:05,728 INFO [RS:0;7bf281cf3991:35781 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:05,728 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.HRegionServer(1482): Serving as 7bf281cf3991,35781,1731497164277, RpcServer on 7bf281cf3991/172.17.0.2:35781, sessionid=0x10038d553fb0001 2024-11-13T11:26:05,729 DEBUG [RS:0;7bf281cf3991:35781 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T11:26:05,730 DEBUG [RS:0;7bf281cf3991:35781 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7bf281cf3991,35781,1731497164277 2024-11-13T11:26:05,730 DEBUG [RS:0;7bf281cf3991:35781 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7bf281cf3991,35781,1731497164277' 2024-11-13T11:26:05,730 DEBUG [RS:0;7bf281cf3991:35781 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T11:26:05,732 DEBUG [RS:0;7bf281cf3991:35781 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T11:26:05,734 DEBUG [RS:0;7bf281cf3991:35781 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T11:26:05,734 DEBUG [RS:0;7bf281cf3991:35781 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T11:26:05,734 DEBUG [RS:0;7bf281cf3991:35781 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7bf281cf3991,35781,1731497164277 2024-11-13T11:26:05,734 DEBUG [RS:0;7bf281cf3991:35781 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7bf281cf3991,35781,1731497164277' 2024-11-13T11:26:05,734 DEBUG [RS:0;7bf281cf3991:35781 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T11:26:05,735 DEBUG [RS:0;7bf281cf3991:35781 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T11:26:05,736 DEBUG [RS:0;7bf281cf3991:35781 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T11:26:05,736 INFO [RS:0;7bf281cf3991:35781 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T11:26:05,736 INFO [RS:0;7bf281cf3991:35781 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T11:26:05,843 INFO [RS:0;7bf281cf3991:35781 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C35781%2C1731497164277, suffix=, logDir=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277, archiveDir=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/oldWALs, maxLogs=32 2024-11-13T11:26:05,846 INFO [RS:0;7bf281cf3991:35781 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C35781%2C1731497164277.1731497165846 2024-11-13T11:26:05,860 INFO [RS:0;7bf281cf3991:35781 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497165846 2024-11-13T11:26:05,865 DEBUG [RS:0;7bf281cf3991:35781 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39697:39697),(127.0.0.1/127.0.0.1:40867:40867)] 2024-11-13T11:26:05,973 DEBUG [7bf281cf3991:40685 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T11:26:05,986 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7bf281cf3991,35781,1731497164277 2024-11-13T11:26:05,992 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7bf281cf3991,35781,1731497164277, state=OPENING 2024-11-13T11:26:06,001 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T11:26:06,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:26:06,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:26:06,004 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:26:06,004 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:26:06,005 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T11:26:06,008 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7bf281cf3991,35781,1731497164277}] 2024-11-13T11:26:06,183 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T11:26:06,187 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41933, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T11:26:06,201 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T11:26:06,202 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:26:06,207 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C35781%2C1731497164277.meta, suffix=.meta, logDir=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277, archiveDir=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/oldWALs, maxLogs=32 2024-11-13T11:26:06,209 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C35781%2C1731497164277.meta.1731497166209.meta 2024-11-13T11:26:06,234 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.meta.1731497166209.meta 2024-11-13T11:26:06,245 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39697:39697),(127.0.0.1/127.0.0.1:40867:40867)] 2024-11-13T11:26:06,248 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T11:26:06,250 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T11:26:06,252 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T11:26:06,258 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T11:26:06,262 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T11:26:06,263 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:26:06,263 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T11:26:06,263 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T11:26:06,266 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T11:26:06,267 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T11:26:06,267 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:26:06,268 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:26:06,268 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T11:26:06,270 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T11:26:06,270 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:26:06,271 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:26:06,271 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T11:26:06,273 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T11:26:06,273 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:26:06,274 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:26:06,274 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T11:26:06,276 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T11:26:06,276 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:26:06,277 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:26:06,278 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T11:26:06,279 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740 2024-11-13T11:26:06,282 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740 2024-11-13T11:26:06,285 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T11:26:06,286 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T11:26:06,287 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T11:26:06,289 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T11:26:06,291 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=750692, jitterRate=-0.0454464852809906}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T11:26:06,291 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T11:26:06,292 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731497166263Writing region info on filesystem at 1731497166264 (+1 ms)Initializing all the Stores at 1731497166265 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497166265Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497166265Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497166265Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497166266 (+1 ms)Cleaning up temporary data from old regions at 1731497166286 (+20 ms)Running coprocessor post-open hooks at 1731497166291 (+5 ms)Region opened successfully at 1731497166292 (+1 ms) 2024-11-13T11:26:06,298 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731497166175 2024-11-13T11:26:06,312 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T11:26:06,313 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T11:26:06,315 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7bf281cf3991,35781,1731497164277 2024-11-13T11:26:06,317 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7bf281cf3991,35781,1731497164277, state=OPEN 2024-11-13T11:26:06,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T11:26:06,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T11:26:06,321 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:26:06,322 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:26:06,322 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7bf281cf3991,35781,1731497164277 2024-11-13T11:26:06,330 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T11:26:06,330 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7bf281cf3991,35781,1731497164277 in 314 msec 2024-11-13T11:26:06,338 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T11:26:06,339 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 776 msec 2024-11-13T11:26:06,341 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:26:06,341 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T11:26:06,363 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T11:26:06,364 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7bf281cf3991,35781,1731497164277, seqNum=-1] 2024-11-13T11:26:06,391 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T11:26:06,393 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36553, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T11:26:06,417 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0530 sec 2024-11-13T11:26:06,418 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731497166417, completionTime=-1 2024-11-13T11:26:06,421 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T11:26:06,421 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T11:26:06,448 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T11:26:06,448 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731497226448 2024-11-13T11:26:06,448 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731497286448 2024-11-13T11:26:06,448 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 26 msec 2024-11-13T11:26:06,451 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,40685,1731497163541-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:06,452 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,40685,1731497163541-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:06,452 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,40685,1731497163541-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:06,453 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7bf281cf3991:40685, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:06,454 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:06,454 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:06,461 DEBUG [master/7bf281cf3991:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T11:26:06,491 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.001sec 2024-11-13T11:26:06,493 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T11:26:06,495 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T11:26:06,496 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T11:26:06,497 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T11:26:06,497 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T11:26:06,498 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,40685,1731497163541-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T11:26:06,499 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,40685,1731497163541-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T11:26:06,514 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T11:26:06,515 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T11:26:06,515 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,40685,1731497163541-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:26:06,547 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48cb9719, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:26:06,549 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-13T11:26:06,549 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-13T11:26:06,552 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7bf281cf3991,40685,-1 for getting cluster id 2024-11-13T11:26:06,555 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T11:26:06,566 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '48c70c1e-8c3b-4e8c-b3d3-d775f9b370ad' 2024-11-13T11:26:06,570 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T11:26:06,570 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "48c70c1e-8c3b-4e8c-b3d3-d775f9b370ad" 2024-11-13T11:26:06,571 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45bd4981, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:26:06,571 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7bf281cf3991,40685,-1] 2024-11-13T11:26:06,575 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T11:26:06,577 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:26:06,579 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49538, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T11:26:06,582 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a2b08a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:26:06,583 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T11:26:06,591 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7bf281cf3991,35781,1731497164277, seqNum=-1] 2024-11-13T11:26:06,591 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T11:26:06,594 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39910, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T11:26:06,616 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7bf281cf3991,40685,1731497163541 2024-11-13T11:26:06,616 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:26:06,624 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T11:26:06,628 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T11:26:06,634 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 7bf281cf3991,40685,1731497163541 2024-11-13T11:26:06,637 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@624ab03d 2024-11-13T11:26:06,638 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T11:26:06,640 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49554, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T11:26:06,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-13T11:26:06,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-13T11:26:06,646 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T11:26:06,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-13T11:26:06,656 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T11:26:06,658 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-13T11:26:06,659 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:26:06,662 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T11:26:06,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T11:26:06,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741835_1011 (size=389) 2024-11-13T11:26:06,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741835_1011 (size=389) 2024-11-13T11:26:07,147 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => bfaa079922789fecd57194b1ee107e5f, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e 2024-11-13T11:26:07,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741836_1012 (size=72) 2024-11-13T11:26:07,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741836_1012 (size=72) 2024-11-13T11:26:07,160 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:26:07,160 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing bfaa079922789fecd57194b1ee107e5f, disabling compactions & flushes 2024-11-13T11:26:07,160 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f. 2024-11-13T11:26:07,160 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f. 2024-11-13T11:26:07,160 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f. after waiting 0 ms 2024-11-13T11:26:07,160 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f. 2024-11-13T11:26:07,160 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f. 2024-11-13T11:26:07,161 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for bfaa079922789fecd57194b1ee107e5f: Waiting for close lock at 1731497167160Disabling compacts and flushes for region at 1731497167160Disabling writes for close at 1731497167160Writing region close event to WAL at 1731497167160Closed at 1731497167160 2024-11-13T11:26:07,163 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T11:26:07,169 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731497167163"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731497167163"}]},"ts":"1731497167163"} 2024-11-13T11:26:07,174 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T11:26:07,177 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T11:26:07,179 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731497167177"}]},"ts":"1731497167177"} 2024-11-13T11:26:07,185 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-13T11:26:07,187 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=bfaa079922789fecd57194b1ee107e5f, ASSIGN}] 2024-11-13T11:26:07,191 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=bfaa079922789fecd57194b1ee107e5f, ASSIGN 2024-11-13T11:26:07,193 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=bfaa079922789fecd57194b1ee107e5f, ASSIGN; state=OFFLINE, location=7bf281cf3991,35781,1731497164277; forceNewPlan=false, retain=false 2024-11-13T11:26:07,346 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bfaa079922789fecd57194b1ee107e5f, regionState=OPENING, regionLocation=7bf281cf3991,35781,1731497164277 2024-11-13T11:26:07,352 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=bfaa079922789fecd57194b1ee107e5f, ASSIGN because future has completed 2024-11-13T11:26:07,353 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bfaa079922789fecd57194b1ee107e5f, server=7bf281cf3991,35781,1731497164277}] 2024-11-13T11:26:07,516 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f. 2024-11-13T11:26:07,516 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => bfaa079922789fecd57194b1ee107e5f, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f.', STARTKEY => '', ENDKEY => ''} 2024-11-13T11:26:07,517 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling bfaa079922789fecd57194b1ee107e5f 2024-11-13T11:26:07,517 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:26:07,517 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for bfaa079922789fecd57194b1ee107e5f 2024-11-13T11:26:07,517 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for bfaa079922789fecd57194b1ee107e5f 2024-11-13T11:26:07,520 INFO [StoreOpener-bfaa079922789fecd57194b1ee107e5f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region bfaa079922789fecd57194b1ee107e5f 2024-11-13T11:26:07,522 INFO [StoreOpener-bfaa079922789fecd57194b1ee107e5f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bfaa079922789fecd57194b1ee107e5f columnFamilyName info 2024-11-13T11:26:07,522 DEBUG [StoreOpener-bfaa079922789fecd57194b1ee107e5f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:26:07,523 INFO [StoreOpener-bfaa079922789fecd57194b1ee107e5f-1 {}] regionserver.HStore(327): Store=bfaa079922789fecd57194b1ee107e5f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:26:07,523 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for bfaa079922789fecd57194b1ee107e5f 2024-11-13T11:26:07,525 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f 2024-11-13T11:26:07,525 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f 2024-11-13T11:26:07,526 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for bfaa079922789fecd57194b1ee107e5f 2024-11-13T11:26:07,526 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for bfaa079922789fecd57194b1ee107e5f 2024-11-13T11:26:07,529 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for bfaa079922789fecd57194b1ee107e5f 2024-11-13T11:26:07,534 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T11:26:07,535 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened bfaa079922789fecd57194b1ee107e5f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=744316, jitterRate=-0.05355428159236908}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T11:26:07,535 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bfaa079922789fecd57194b1ee107e5f 2024-11-13T11:26:07,536 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for bfaa079922789fecd57194b1ee107e5f: Running coprocessor pre-open hook at 1731497167517Writing region info on filesystem at 1731497167517Initializing all the Stores at 1731497167519 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497167519Cleaning up temporary data from old regions at 1731497167526 (+7 ms)Running coprocessor post-open hooks at 1731497167535 (+9 ms)Region opened successfully at 1731497167536 (+1 ms) 2024-11-13T11:26:07,539 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f., pid=6, masterSystemTime=1731497167509 2024-11-13T11:26:07,545 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bfaa079922789fecd57194b1ee107e5f, regionState=OPEN, openSeqNum=2, regionLocation=7bf281cf3991,35781,1731497164277 2024-11-13T11:26:07,545 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f. 2024-11-13T11:26:07,545 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f. 2024-11-13T11:26:07,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bfaa079922789fecd57194b1ee107e5f, server=7bf281cf3991,35781,1731497164277 because future has completed 2024-11-13T11:26:07,558 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T11:26:07,560 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure bfaa079922789fecd57194b1ee107e5f, server=7bf281cf3991,35781,1731497164277 in 199 msec 2024-11-13T11:26:07,564 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T11:26:07,564 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=bfaa079922789fecd57194b1ee107e5f, ASSIGN in 371 msec 2024-11-13T11:26:07,567 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T11:26:07,567 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731497167567"}]},"ts":"1731497167567"} 2024-11-13T11:26:07,571 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-13T11:26:07,573 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T11:26:07,577 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 924 msec 2024-11-13T11:26:11,768 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-13T11:26:11,852 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T11:26:11,854 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-13T11:26:13,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T11:26:13,966 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T11:26:13,967 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-13T11:26:13,967 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-13T11:26:13,968 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T11:26:13,968 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T11:26:13,969 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T11:26:13,969 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-13T11:26:16,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T11:26:16,732 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-13T11:26:16,735 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-13T11:26:16,742 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-13T11:26:16,743 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f. 2024-11-13T11:26:16,744 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C35781%2C1731497164277.1731497176744 2024-11-13T11:26:16,759 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:26:16,759 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:26:16,759 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:26:16,760 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:26:16,760 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:26:16,760 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497165846 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497176744 2024-11-13T11:26:16,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741833_1009 (size=451) 2024-11-13T11:26:16,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741833_1009 (size=451) 2024-11-13T11:26:16,770 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497165846 to hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/oldWALs/7bf281cf3991%2C35781%2C1731497164277.1731497165846 2024-11-13T11:26:16,773 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39697:39697),(127.0.0.1/127.0.0.1:40867:40867)] 2024-11-13T11:26:16,786 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f., hostname=7bf281cf3991,35781,1731497164277, seqNum=2] 2024-11-13T11:26:28,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35781 {}] regionserver.HRegion(8855): Flush requested on bfaa079922789fecd57194b1ee107e5f 2024-11-13T11:26:28,833 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bfaa079922789fecd57194b1ee107e5f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T11:26:28,888 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/.tmp/info/0574297e17ea49689f32fcebc380a269 is 1080, key is row0001/info:/1731497176790/Put/seqid=0 2024-11-13T11:26:28,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741838_1014 (size=12509) 2024-11-13T11:26:28,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741838_1014 (size=12509) 2024-11-13T11:26:28,900 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/.tmp/info/0574297e17ea49689f32fcebc380a269 2024-11-13T11:26:28,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/.tmp/info/0574297e17ea49689f32fcebc380a269 as hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/0574297e17ea49689f32fcebc380a269 2024-11-13T11:26:28,953 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/0574297e17ea49689f32fcebc380a269, entries=7, sequenceid=11, filesize=12.2 K 2024-11-13T11:26:28,960 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for bfaa079922789fecd57194b1ee107e5f in 126ms, sequenceid=11, compaction requested=false 2024-11-13T11:26:28,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bfaa079922789fecd57194b1ee107e5f: 2024-11-13T11:26:32,550 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T11:26:36,850 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C35781%2C1731497164277.1731497196850 2024-11-13T11:26:37,059 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:26:37,059 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:26:37,059 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:26:37,060 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:26:37,060 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:26:37,060 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:26:37,060 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497176744 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497196850 2024-11-13T11:26:37,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741837_1013 (size=12399) 2024-11-13T11:26:37,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741837_1013 (size=12399) 2024-11-13T11:26:37,069 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39697:39697),(127.0.0.1/127.0.0.1:40867:40867)] 2024-11-13T11:26:37,272 INFO [FSHLog-0-hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e-prefix:7bf281cf3991,35781,1731497164277 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:26:39,477 INFO [FSHLog-0-hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e-prefix:7bf281cf3991,35781,1731497164277 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:26:41,682 INFO [FSHLog-0-hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e-prefix:7bf281cf3991,35781,1731497164277 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:26:43,886 INFO [FSHLog-0-hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e-prefix:7bf281cf3991,35781,1731497164277 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:26:43,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35781 {}] regionserver.HRegion(8855): Flush requested on bfaa079922789fecd57194b1ee107e5f 2024-11-13T11:26:43,886 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bfaa079922789fecd57194b1ee107e5f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T11:26:44,089 INFO [FSHLog-0-hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e-prefix:7bf281cf3991,35781,1731497164277 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:26:44,097 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/.tmp/info/f05646008e6642db88916f7b40491356 is 1080, key is row0008/info:/1731497190832/Put/seqid=0 2024-11-13T11:26:44,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741840_1016 (size=12509) 2024-11-13T11:26:44,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741840_1016 (size=12509) 2024-11-13T11:26:44,106 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/.tmp/info/f05646008e6642db88916f7b40491356 2024-11-13T11:26:44,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/.tmp/info/f05646008e6642db88916f7b40491356 as hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/f05646008e6642db88916f7b40491356 2024-11-13T11:26:44,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/f05646008e6642db88916f7b40491356, entries=7, sequenceid=21, filesize=12.2 K 2024-11-13T11:26:44,328 INFO [FSHLog-0-hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e-prefix:7bf281cf3991,35781,1731497164277 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:26:44,329 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for bfaa079922789fecd57194b1ee107e5f in 442ms, sequenceid=21, compaction requested=false 2024-11-13T11:26:44,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bfaa079922789fecd57194b1ee107e5f: 2024-11-13T11:26:44,329 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-13T11:26:44,330 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:26:44,331 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/0574297e17ea49689f32fcebc380a269 because midkey is the same as first or last row 2024-11-13T11:26:46,091 INFO [FSHLog-0-hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e-prefix:7bf281cf3991,35781,1731497164277 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:26:46,524 INFO [master/7bf281cf3991:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-13T11:26:46,525 INFO [master/7bf281cf3991:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-13T11:26:48,295 INFO [FSHLog-0-hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e-prefix:7bf281cf3991,35781,1731497164277 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:26:48,298 WARN [FSHLog-0-hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e-prefix:7bf281cf3991,35781,1731497164277 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:26:48,299 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7bf281cf3991%2C35781%2C1731497164277:(num 1731497196850) roll requested 2024-11-13T11:26:48,299 INFO [regionserver/7bf281cf3991:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C35781%2C1731497164277.1731497208299 2024-11-13T11:26:48,509 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:26:48,509 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:26:48,509 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:26:48,510 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:26:48,510 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:26:48,510 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:26:48,510 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497196850 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497208299 2024-11-13T11:26:48,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741839_1015 (size=7739) 2024-11-13T11:26:48,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741839_1015 (size=7739) 2024-11-13T11:26:48,514 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39697:39697),(127.0.0.1/127.0.0.1:40867:40867)] 2024-11-13T11:26:48,514 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497196850 is not closed yet, will try archiving it next time 2024-11-13T11:26:48,514 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497176744 to hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/oldWALs/7bf281cf3991%2C35781%2C1731497164277.1731497176744 2024-11-13T11:26:50,499 INFO [FSHLog-0-hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e-prefix:7bf281cf3991,35781,1731497164277 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:26:52,517 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region bfaa079922789fecd57194b1ee107e5f, had cached 0 bytes from a total of 25018 2024-11-13T11:26:52,707 INFO [FSHLog-0-hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e-prefix:7bf281cf3991,35781,1731497164277 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:26:54,912 INFO [FSHLog-0-hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e-prefix:7bf281cf3991,35781,1731497164277 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:26:57,116 INFO [FSHLog-0-hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e-prefix:7bf281cf3991,35781,1731497164277 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:26:59,118 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T11:26:59,118 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C35781%2C1731497164277.1731497219118 2024-11-13T11:27:02,550 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T11:27:04,135 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5013 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:27:04,138 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5013 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:27:04,138 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7bf281cf3991%2C35781%2C1731497164277:(num 1731497219118) roll requested 2024-11-13T11:27:04,138 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:04,138 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:04,138 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:04,138 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:04,138 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:04,139 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497208299 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497219118 2024-11-13T11:27:04,140 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39697:39697),(127.0.0.1/127.0.0.1:40867:40867)] 2024-11-13T11:27:04,140 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497208299 is not closed yet, will try archiving it next time 2024-11-13T11:27:04,140 INFO [regionserver/7bf281cf3991:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C35781%2C1731497164277.1731497224140 2024-11-13T11:27:04,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741841_1017 (size=4753) 2024-11-13T11:27:04,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741841_1017 (size=4753) 2024-11-13T11:27:09,143 INFO [FSHLog-0-hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e-prefix:7bf281cf3991,35781,1731497164277 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:27:09,144 WARN [FSHLog-0-hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e-prefix:7bf281cf3991,35781,1731497164277 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:27:09,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35781 {}] regionserver.HRegion(8855): Flush requested on bfaa079922789fecd57194b1ee107e5f 2024-11-13T11:27:09,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bfaa079922789fecd57194b1ee107e5f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T11:27:09,149 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:27:09,150 WARN [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:27:11,145 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T11:27:14,148 INFO [FSHLog-0-hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e-prefix:7bf281cf3991,35781,1731497164277 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:27:14,148 WARN [FSHLog-0-hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e-prefix:7bf281cf3991,35781,1731497164277 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK], DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK]] 2024-11-13T11:27:14,149 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:14,150 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:14,150 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:14,151 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:14,151 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:14,152 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497219118 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497224140 2024-11-13T11:27:14,154 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40867:40867),(127.0.0.1/127.0.0.1:39697:39697)] 2024-11-13T11:27:14,154 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497219118 is not closed yet, will try archiving it next time 2024-11-13T11:27:14,154 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7bf281cf3991%2C35781%2C1731497164277:(num 1731497224140) roll requested 2024-11-13T11:27:14,155 INFO [regionserver/7bf281cf3991:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C35781%2C1731497164277.1731497234155 2024-11-13T11:27:14,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741842_1018 (size=1569) 2024-11-13T11:27:14,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741842_1018 (size=1569) 2024-11-13T11:27:14,159 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/.tmp/info/037d48a20c8a4eb092f5c278677ff27f is 1080, key is row0015/info:/1731497205889/Put/seqid=0 2024-11-13T11:27:14,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741844_1020 (size=12509) 2024-11-13T11:27:14,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741844_1020 (size=12509) 2024-11-13T11:27:14,166 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/.tmp/info/037d48a20c8a4eb092f5c278677ff27f 2024-11-13T11:27:14,177 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/.tmp/info/037d48a20c8a4eb092f5c278677ff27f as hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/037d48a20c8a4eb092f5c278677ff27f 2024-11-13T11:27:14,188 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/037d48a20c8a4eb092f5c278677ff27f, entries=7, sequenceid=31, filesize=12.2 K 2024-11-13T11:27:19,164 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK], DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK]] 2024-11-13T11:27:19,164 WARN [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK], DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK]] 2024-11-13T11:27:19,189 INFO [FSHLog-0-hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e-prefix:7bf281cf3991,35781,1731497164277 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK], DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK]] 2024-11-13T11:27:19,189 WARN [FSHLog-0-hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e-prefix:7bf281cf3991,35781,1731497164277 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41663,DS-02669440-8167-4429-b516-6b6e405a44a1,DISK], DatanodeInfoWithStorage[127.0.0.1:35943,DS-602ddc0b-8347-4402-b835-e64ddb0125fa,DISK]] 2024-11-13T11:27:19,190 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for bfaa079922789fecd57194b1ee107e5f in 10045ms, sequenceid=31, compaction requested=true 2024-11-13T11:27:19,190 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:19,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bfaa079922789fecd57194b1ee107e5f: 2024-11-13T11:27:19,190 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:19,190 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-13T11:27:19,190 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:27:19,190 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:19,190 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/0574297e17ea49689f32fcebc380a269 because midkey is the same as first or last row 2024-11-13T11:27:19,190 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:19,191 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:19,191 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497224140 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497234155 2024-11-13T11:27:19,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741843_1019 (size=438) 2024-11-13T11:27:19,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741843_1019 (size=438) 2024-11-13T11:27:19,196 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497196850 to hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/oldWALs/7bf281cf3991%2C35781%2C1731497164277.1731497196850 2024-11-13T11:27:19,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfaa079922789fecd57194b1ee107e5f:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T11:27:19,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:27:19,200 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497208299 to hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/oldWALs/7bf281cf3991%2C35781%2C1731497164277.1731497208299 2024-11-13T11:27:19,201 DEBUG [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T11:27:19,203 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39697:39697),(127.0.0.1/127.0.0.1:40867:40867)] 2024-11-13T11:27:19,203 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7bf281cf3991%2C35781%2C1731497164277:(num 1731497239203) roll requested 2024-11-13T11:27:19,204 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C35781%2C1731497164277.1731497239203 2024-11-13T11:27:19,206 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497219118 to hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/oldWALs/7bf281cf3991%2C35781%2C1731497164277.1731497219118 2024-11-13T11:27:19,208 DEBUG [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T11:27:19,209 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497224140 to hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/oldWALs/7bf281cf3991%2C35781%2C1731497164277.1731497224140 2024-11-13T11:27:19,210 DEBUG [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] regionserver.HStore(1541): bfaa079922789fecd57194b1ee107e5f/info is initiating minor compaction (all files) 2024-11-13T11:27:19,211 INFO [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bfaa079922789fecd57194b1ee107e5f/info in TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f. 2024-11-13T11:27:19,211 INFO [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/0574297e17ea49689f32fcebc380a269, hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/f05646008e6642db88916f7b40491356, hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/037d48a20c8a4eb092f5c278677ff27f] into tmpdir=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/.tmp, totalSize=36.6 K 2024-11-13T11:27:19,213 DEBUG [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0574297e17ea49689f32fcebc380a269, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731497176790 2024-11-13T11:27:19,214 DEBUG [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] compactions.Compactor(225): Compacting f05646008e6642db88916f7b40491356, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731497190832 2024-11-13T11:27:19,215 DEBUG [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] compactions.Compactor(225): Compacting 037d48a20c8a4eb092f5c278677ff27f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731497205889 2024-11-13T11:27:19,236 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:19,237 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:19,237 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:19,237 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:19,237 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:19,237 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497234155 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497239203 2024-11-13T11:27:19,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741845_1021 (size=93) 2024-11-13T11:27:19,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741845_1021 (size=93) 2024-11-13T11:27:19,241 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497234155 to hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/oldWALs/7bf281cf3991%2C35781%2C1731497164277.1731497234155 2024-11-13T11:27:19,256 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39697:39697),(127.0.0.1/127.0.0.1:40867:40867)] 2024-11-13T11:27:19,257 INFO [regionserver/7bf281cf3991:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C35781%2C1731497164277.1731497239256 2024-11-13T11:27:19,292 INFO [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfaa079922789fecd57194b1ee107e5f#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T11:27:19,293 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:19,293 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:19,293 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:19,293 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:19,293 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:19,293 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497239203 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497239256 2024-11-13T11:27:19,294 DEBUG [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/.tmp/info/3dde9128fbb24fc99706ad0392ad35d4 is 1080, key is row0001/info:/1731497176790/Put/seqid=0 2024-11-13T11:27:19,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741846_1022 (size=1258) 2024-11-13T11:27:19,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741846_1022 (size=1258) 2024-11-13T11:27:19,309 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40867:40867),(127.0.0.1/127.0.0.1:39697:39697)] 2024-11-13T11:27:19,309 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/WALs/7bf281cf3991,35781,1731497164277/7bf281cf3991%2C35781%2C1731497164277.1731497239203 is not closed yet, will try archiving it next time 2024-11-13T11:27:19,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741848_1024 (size=27710) 2024-11-13T11:27:19,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741848_1024 (size=27710) 2024-11-13T11:27:19,338 DEBUG [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/.tmp/info/3dde9128fbb24fc99706ad0392ad35d4 as hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/3dde9128fbb24fc99706ad0392ad35d4 2024-11-13T11:27:19,357 INFO [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bfaa079922789fecd57194b1ee107e5f/info of bfaa079922789fecd57194b1ee107e5f into 3dde9128fbb24fc99706ad0392ad35d4(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T11:27:19,358 DEBUG [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bfaa079922789fecd57194b1ee107e5f: 2024-11-13T11:27:19,361 INFO [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f., storeName=bfaa079922789fecd57194b1ee107e5f/info, priority=13, startTime=1731497239192; duration=0sec 2024-11-13T11:27:19,361 DEBUG [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-13T11:27:19,361 DEBUG [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:27:19,361 DEBUG [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/3dde9128fbb24fc99706ad0392ad35d4 because midkey is the same as first or last row 2024-11-13T11:27:19,362 DEBUG [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-13T11:27:19,362 DEBUG [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:27:19,362 DEBUG [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/3dde9128fbb24fc99706ad0392ad35d4 because midkey is the same as first or last row 2024-11-13T11:27:19,362 DEBUG [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-13T11:27:19,362 DEBUG [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:27:19,362 DEBUG [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/3dde9128fbb24fc99706ad0392ad35d4 because midkey is the same as first or last row 2024-11-13T11:27:19,362 DEBUG [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:27:19,362 DEBUG [RS:0;7bf281cf3991:35781-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfaa079922789fecd57194b1ee107e5f:info 2024-11-13T11:27:31,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35781 {}] regionserver.HRegion(8855): Flush requested on bfaa079922789fecd57194b1ee107e5f 2024-11-13T11:27:31,288 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bfaa079922789fecd57194b1ee107e5f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T11:27:31,297 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/.tmp/info/6966ea7280d242d5b8f8d78a57f8ad6b is 1080, key is row0022/info:/1731497239258/Put/seqid=0 2024-11-13T11:27:31,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741849_1025 (size=12509) 2024-11-13T11:27:31,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741849_1025 (size=12509) 2024-11-13T11:27:31,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/.tmp/info/6966ea7280d242d5b8f8d78a57f8ad6b 2024-11-13T11:27:31,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/.tmp/info/6966ea7280d242d5b8f8d78a57f8ad6b as hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/6966ea7280d242d5b8f8d78a57f8ad6b 2024-11-13T11:27:31,351 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/6966ea7280d242d5b8f8d78a57f8ad6b, entries=7, sequenceid=42, filesize=12.2 K 2024-11-13T11:27:31,353 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for bfaa079922789fecd57194b1ee107e5f in 66ms, sequenceid=42, compaction requested=false 2024-11-13T11:27:31,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bfaa079922789fecd57194b1ee107e5f: 2024-11-13T11:27:31,353 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-13T11:27:31,353 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:27:31,354 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/3dde9128fbb24fc99706ad0392ad35d4 because midkey is the same as first or last row 2024-11-13T11:27:32,551 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T11:27:37,517 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region bfaa079922789fecd57194b1ee107e5f, had cached 0 bytes from a total of 40219 2024-11-13T11:27:39,301 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T11:27:39,302 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T11:27:39,302 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:27:39,307 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:27:39,308 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:27:39,308 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T11:27:39,308 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T11:27:39,308 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1469019133, stopped=false 2024-11-13T11:27:39,309 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7bf281cf3991,40685,1731497163541 2024-11-13T11:27:39,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T11:27:39,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T11:27:39,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:39,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:39,310 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T11:27:39,311 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T11:27:39,311 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:27:39,311 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:27:39,311 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:27:39,311 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7bf281cf3991,35781,1731497164277' ***** 2024-11-13T11:27:39,311 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T11:27:39,312 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T11:27:39,312 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T11:27:39,312 INFO [RS:0;7bf281cf3991:35781 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T11:27:39,312 INFO [RS:0;7bf281cf3991:35781 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T11:27:39,313 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.HRegionServer(3091): Received CLOSE for bfaa079922789fecd57194b1ee107e5f 2024-11-13T11:27:39,314 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:27:39,333 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.HRegionServer(959): stopping server 7bf281cf3991,35781,1731497164277 2024-11-13T11:27:39,333 INFO [RS:0;7bf281cf3991:35781 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T11:27:39,333 INFO [RS:0;7bf281cf3991:35781 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7bf281cf3991:35781. 2024-11-13T11:27:39,333 DEBUG [RS:0;7bf281cf3991:35781 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:27:39,333 DEBUG [RS:0;7bf281cf3991:35781 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:27:39,334 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing bfaa079922789fecd57194b1ee107e5f, disabling compactions & flushes 2024-11-13T11:27:39,334 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f. 2024-11-13T11:27:39,334 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T11:27:39,334 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T11:27:39,334 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f. 2024-11-13T11:27:39,334 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T11:27:39,334 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f. after waiting 0 ms 2024-11-13T11:27:39,334 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f. 2024-11-13T11:27:39,334 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T11:27:39,334 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing bfaa079922789fecd57194b1ee107e5f 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-13T11:27:39,335 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-13T11:27:39,335 DEBUG [RS:0;7bf281cf3991:35781 {}] regionserver.HRegionServer(1325): Online Regions={bfaa079922789fecd57194b1ee107e5f=TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f., 1588230740=hbase:meta,,1.1588230740} 2024-11-13T11:27:39,335 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T11:27:39,335 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T11:27:39,335 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T11:27:39,335 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T11:27:39,335 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T11:27:39,335 DEBUG [RS:0;7bf281cf3991:35781 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, bfaa079922789fecd57194b1ee107e5f 2024-11-13T11:27:39,335 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-13T11:27:39,342 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/.tmp/info/e012ebac404e4d50bac559aa627cae0e is 1080, key is row0029/info:/1731497253290/Put/seqid=0 2024-11-13T11:27:39,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741850_1026 (size=8193) 2024-11-13T11:27:39,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741850_1026 (size=8193) 2024-11-13T11:27:39,358 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/.tmp/info/e012ebac404e4d50bac559aa627cae0e 2024-11-13T11:27:39,368 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740/.tmp/info/ae4a2c8c5fe44cfda6d643b7fad19be8 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f./info:regioninfo/1731497167544/Put/seqid=0 2024-11-13T11:27:39,373 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/.tmp/info/e012ebac404e4d50bac559aa627cae0e as hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/e012ebac404e4d50bac559aa627cae0e 2024-11-13T11:27:39,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741851_1027 (size=7016) 2024-11-13T11:27:39,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741851_1027 (size=7016) 2024-11-13T11:27:39,384 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740/.tmp/info/ae4a2c8c5fe44cfda6d643b7fad19be8 2024-11-13T11:27:39,384 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/e012ebac404e4d50bac559aa627cae0e, entries=3, sequenceid=48, filesize=8.0 K 2024-11-13T11:27:39,386 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for bfaa079922789fecd57194b1ee107e5f in 52ms, sequenceid=48, compaction requested=true 2024-11-13T11:27:39,387 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/0574297e17ea49689f32fcebc380a269, hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/f05646008e6642db88916f7b40491356, hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/037d48a20c8a4eb092f5c278677ff27f] to archive 2024-11-13T11:27:39,393 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T11:27:39,408 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/0574297e17ea49689f32fcebc380a269 to hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/archive/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/0574297e17ea49689f32fcebc380a269 2024-11-13T11:27:39,412 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/f05646008e6642db88916f7b40491356 to hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/archive/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/f05646008e6642db88916f7b40491356 2024-11-13T11:27:39,416 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/037d48a20c8a4eb092f5c278677ff27f to hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/archive/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/info/037d48a20c8a4eb092f5c278677ff27f 2024-11-13T11:27:39,427 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=7bf281cf3991:40685 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-13T11:27:39,428 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [0574297e17ea49689f32fcebc380a269=12509, f05646008e6642db88916f7b40491356=12509, 037d48a20c8a4eb092f5c278677ff27f=12509] 2024-11-13T11:27:39,429 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740/.tmp/ns/15d0c610bd3949009f0de9a56ca11735 is 43, key is default/ns:d/1731497166399/Put/seqid=0 2024-11-13T11:27:39,435 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/default/TestLogRolling-testSlowSyncLogRolling/bfaa079922789fecd57194b1ee107e5f/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-13T11:27:39,437 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f. 2024-11-13T11:27:39,438 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for bfaa079922789fecd57194b1ee107e5f: Waiting for close lock at 1731497259333Running coprocessor pre-close hooks at 1731497259333Disabling compacts and flushes for region at 1731497259334 (+1 ms)Disabling writes for close at 1731497259334Obtaining lock to block concurrent updates at 1731497259334Preparing flush snapshotting stores in bfaa079922789fecd57194b1ee107e5f at 1731497259334Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731497259335 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f. at 1731497259336 (+1 ms)Flushing bfaa079922789fecd57194b1ee107e5f/info: creating writer at 1731497259336Flushing bfaa079922789fecd57194b1ee107e5f/info: appending metadata at 1731497259342 (+6 ms)Flushing bfaa079922789fecd57194b1ee107e5f/info: closing flushed file at 1731497259342Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@483dd19f: reopening flushed file at 1731497259372 (+30 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for bfaa079922789fecd57194b1ee107e5f in 52ms, sequenceid=48, compaction requested=true at 1731497259386 (+14 ms)Writing region close event to WAL at 1731497259429 (+43 ms)Running coprocessor post-close hooks at 1731497259436 (+7 ms)Closed at 1731497259437 (+1 ms) 2024-11-13T11:27:39,438 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731497166642.bfaa079922789fecd57194b1ee107e5f. 2024-11-13T11:27:39,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741852_1028 (size=5153) 2024-11-13T11:27:39,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741852_1028 (size=5153) 2024-11-13T11:27:39,446 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740/.tmp/ns/15d0c610bd3949009f0de9a56ca11735 2024-11-13T11:27:39,474 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740/.tmp/table/14ba9b8ced184a4db82236efdf61822b is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731497167567/Put/seqid=0 2024-11-13T11:27:39,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741853_1029 (size=5396) 2024-11-13T11:27:39,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741853_1029 (size=5396) 2024-11-13T11:27:39,483 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740/.tmp/table/14ba9b8ced184a4db82236efdf61822b 2024-11-13T11:27:39,493 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740/.tmp/info/ae4a2c8c5fe44cfda6d643b7fad19be8 as hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740/info/ae4a2c8c5fe44cfda6d643b7fad19be8 2024-11-13T11:27:39,501 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740/info/ae4a2c8c5fe44cfda6d643b7fad19be8, entries=10, sequenceid=11, filesize=6.9 K 2024-11-13T11:27:39,503 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740/.tmp/ns/15d0c610bd3949009f0de9a56ca11735 as hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740/ns/15d0c610bd3949009f0de9a56ca11735 2024-11-13T11:27:39,511 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740/ns/15d0c610bd3949009f0de9a56ca11735, entries=2, sequenceid=11, filesize=5.0 K 2024-11-13T11:27:39,512 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740/.tmp/table/14ba9b8ced184a4db82236efdf61822b as hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740/table/14ba9b8ced184a4db82236efdf61822b 2024-11-13T11:27:39,520 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740/table/14ba9b8ced184a4db82236efdf61822b, entries=2, sequenceid=11, filesize=5.3 K 2024-11-13T11:27:39,522 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 187ms, sequenceid=11, compaction requested=false 2024-11-13T11:27:39,533 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-13T11:27:39,535 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T11:27:39,535 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T11:27:39,535 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731497259335Running coprocessor pre-close hooks at 1731497259335Disabling compacts and flushes for region at 1731497259335Disabling writes for close at 1731497259335Obtaining lock to block concurrent updates at 1731497259335Preparing flush snapshotting stores in 1588230740 at 1731497259335Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731497259336 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731497259337 (+1 ms)Flushing 1588230740/info: creating writer at 1731497259337Flushing 1588230740/info: appending metadata at 1731497259367 (+30 ms)Flushing 1588230740/info: closing flushed file at 1731497259368 (+1 ms)Flushing 1588230740/ns: creating writer at 1731497259395 (+27 ms)Flushing 1588230740/ns: appending metadata at 1731497259428 (+33 ms)Flushing 1588230740/ns: closing flushed file at 1731497259428Flushing 1588230740/table: creating writer at 1731497259458 (+30 ms)Flushing 1588230740/table: appending metadata at 1731497259474 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731497259474Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@fd7c2df: reopening flushed file at 1731497259492 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ae1adc: reopening flushed file at 1731497259502 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b73e328: reopening flushed file at 1731497259511 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 187ms, sequenceid=11, compaction requested=false at 1731497259522 (+11 ms)Writing region close event to WAL at 1731497259523 (+1 ms)Running coprocessor post-close hooks at 1731497259534 (+11 ms)Closed at 1731497259535 (+1 ms) 2024-11-13T11:27:39,535 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T11:27:39,535 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.HRegionServer(976): stopping server 7bf281cf3991,35781,1731497164277; all regions closed. 2024-11-13T11:27:39,538 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:39,538 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:39,539 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:39,539 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:39,539 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:39,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741834_1010 (size=3066) 2024-11-13T11:27:39,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741834_1010 (size=3066) 2024-11-13T11:27:39,553 DEBUG [RS:0;7bf281cf3991:35781 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/oldWALs 2024-11-13T11:27:39,553 INFO [RS:0;7bf281cf3991:35781 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7bf281cf3991%2C35781%2C1731497164277.meta:.meta(num 1731497166209) 2024-11-13T11:27:39,557 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:39,558 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:39,558 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:39,558 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:39,558 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:39,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741847_1023 (size=12695) 2024-11-13T11:27:39,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741847_1023 (size=12695) 2024-11-13T11:27:39,569 DEBUG [RS:0;7bf281cf3991:35781 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/oldWALs 2024-11-13T11:27:39,570 INFO [RS:0;7bf281cf3991:35781 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7bf281cf3991%2C35781%2C1731497164277:(num 1731497239256) 2024-11-13T11:27:39,570 DEBUG [RS:0;7bf281cf3991:35781 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:27:39,570 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T11:27:39,570 INFO [RS:0;7bf281cf3991:35781 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T11:27:39,570 INFO [RS:0;7bf281cf3991:35781 {}] hbase.ChoreService(370): Chore service for: regionserver/7bf281cf3991:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T11:27:39,571 INFO [RS:0;7bf281cf3991:35781 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T11:27:39,571 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T11:27:39,571 INFO [RS:0;7bf281cf3991:35781 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35781 2024-11-13T11:27:39,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7bf281cf3991,35781,1731497164277 2024-11-13T11:27:39,577 INFO [RS:0;7bf281cf3991:35781 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T11:27:39,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T11:27:39,578 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7bf281cf3991,35781,1731497164277] 2024-11-13T11:27:39,580 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7bf281cf3991,35781,1731497164277 already deleted, retry=false 2024-11-13T11:27:39,580 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7bf281cf3991,35781,1731497164277 expired; onlineServers=0 2024-11-13T11:27:39,581 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7bf281cf3991,40685,1731497163541' ***** 2024-11-13T11:27:39,581 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T11:27:39,581 INFO [M:0;7bf281cf3991:40685 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T11:27:39,581 INFO [M:0;7bf281cf3991:40685 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T11:27:39,581 DEBUG [M:0;7bf281cf3991:40685 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T11:27:39,581 DEBUG [M:0;7bf281cf3991:40685 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T11:27:39,581 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T11:27:39,582 DEBUG [master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497165470 {}] cleaner.HFileCleaner(306): Exit Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497165470,5,FailOnTimeoutGroup] 2024-11-13T11:27:39,582 DEBUG [master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497165469 {}] cleaner.HFileCleaner(306): Exit Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497165469,5,FailOnTimeoutGroup] 2024-11-13T11:27:39,582 INFO [M:0;7bf281cf3991:40685 {}] hbase.ChoreService(370): Chore service for: master/7bf281cf3991:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T11:27:39,582 INFO [M:0;7bf281cf3991:40685 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T11:27:39,582 DEBUG [M:0;7bf281cf3991:40685 {}] master.HMaster(1795): Stopping service threads 2024-11-13T11:27:39,582 INFO [M:0;7bf281cf3991:40685 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T11:27:39,582 INFO [M:0;7bf281cf3991:40685 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T11:27:39,583 INFO [M:0;7bf281cf3991:40685 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T11:27:39,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T11:27:39,583 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T11:27:39,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:39,584 DEBUG [M:0;7bf281cf3991:40685 {}] zookeeper.ZKUtil(347): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T11:27:39,584 WARN [M:0;7bf281cf3991:40685 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T11:27:39,585 INFO [M:0;7bf281cf3991:40685 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/.lastflushedseqids 2024-11-13T11:27:39,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741854_1030 (size=130) 2024-11-13T11:27:39,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741854_1030 (size=130) 2024-11-13T11:27:39,600 INFO [M:0;7bf281cf3991:40685 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T11:27:39,601 INFO [M:0;7bf281cf3991:40685 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T11:27:39,601 DEBUG [M:0;7bf281cf3991:40685 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T11:27:39,601 INFO [M:0;7bf281cf3991:40685 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:27:39,601 DEBUG [M:0;7bf281cf3991:40685 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:27:39,601 DEBUG [M:0;7bf281cf3991:40685 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T11:27:39,601 DEBUG [M:0;7bf281cf3991:40685 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:27:39,601 INFO [M:0;7bf281cf3991:40685 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-13T11:27:39,628 DEBUG [M:0;7bf281cf3991:40685 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/81228944ea944c8d94ff7a32a6fd5efc is 82, key is hbase:meta,,1/info:regioninfo/1731497166314/Put/seqid=0 2024-11-13T11:27:39,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741855_1031 (size=5672) 2024-11-13T11:27:39,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741855_1031 (size=5672) 2024-11-13T11:27:39,640 INFO [M:0;7bf281cf3991:40685 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/81228944ea944c8d94ff7a32a6fd5efc 2024-11-13T11:27:39,674 DEBUG [M:0;7bf281cf3991:40685 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cb793355261345408c462cc5fb9f28f1 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731497167576/Put/seqid=0 2024-11-13T11:27:39,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:27:39,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35781-0x10038d553fb0001, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:27:39,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741856_1032 (size=6247) 2024-11-13T11:27:39,682 INFO [RS:0;7bf281cf3991:35781 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T11:27:39,682 INFO [RS:0;7bf281cf3991:35781 {}] regionserver.HRegionServer(1031): Exiting; stopping=7bf281cf3991,35781,1731497164277; zookeeper connection closed. 2024-11-13T11:27:39,682 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4e3f10bd {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4e3f10bd 2024-11-13T11:27:39,683 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T11:27:39,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741856_1032 (size=6247) 2024-11-13T11:27:39,684 INFO [M:0;7bf281cf3991:40685 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cb793355261345408c462cc5fb9f28f1 2024-11-13T11:27:39,685 INFO [regionserver/7bf281cf3991:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T11:27:39,693 INFO [M:0;7bf281cf3991:40685 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cb793355261345408c462cc5fb9f28f1 2024-11-13T11:27:39,712 DEBUG [M:0;7bf281cf3991:40685 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/37e4be2124b748f7853e2b5ca5712437 is 69, key is 7bf281cf3991,35781,1731497164277/rs:state/1731497165602/Put/seqid=0 2024-11-13T11:27:39,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741857_1033 (size=5156) 2024-11-13T11:27:39,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741857_1033 (size=5156) 2024-11-13T11:27:39,727 INFO [M:0;7bf281cf3991:40685 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/37e4be2124b748f7853e2b5ca5712437 2024-11-13T11:27:39,757 DEBUG [M:0;7bf281cf3991:40685 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dcdb4c9c2dfd4a6a890df34bf9822b2b is 52, key is load_balancer_on/state:d/1731497166621/Put/seqid=0 2024-11-13T11:27:39,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741858_1034 (size=5056) 2024-11-13T11:27:39,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741858_1034 (size=5056) 2024-11-13T11:27:39,773 INFO [M:0;7bf281cf3991:40685 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dcdb4c9c2dfd4a6a890df34bf9822b2b 2024-11-13T11:27:39,785 DEBUG [M:0;7bf281cf3991:40685 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/81228944ea944c8d94ff7a32a6fd5efc as hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/81228944ea944c8d94ff7a32a6fd5efc 2024-11-13T11:27:39,794 INFO [M:0;7bf281cf3991:40685 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/81228944ea944c8d94ff7a32a6fd5efc, entries=8, sequenceid=59, filesize=5.5 K 2024-11-13T11:27:39,795 DEBUG [M:0;7bf281cf3991:40685 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cb793355261345408c462cc5fb9f28f1 as hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cb793355261345408c462cc5fb9f28f1 2024-11-13T11:27:39,805 INFO [M:0;7bf281cf3991:40685 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cb793355261345408c462cc5fb9f28f1 2024-11-13T11:27:39,806 INFO [M:0;7bf281cf3991:40685 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cb793355261345408c462cc5fb9f28f1, entries=6, sequenceid=59, filesize=6.1 K 2024-11-13T11:27:39,808 DEBUG [M:0;7bf281cf3991:40685 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/37e4be2124b748f7853e2b5ca5712437 as hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/37e4be2124b748f7853e2b5ca5712437 2024-11-13T11:27:39,818 INFO [M:0;7bf281cf3991:40685 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/37e4be2124b748f7853e2b5ca5712437, entries=1, sequenceid=59, filesize=5.0 K 2024-11-13T11:27:39,821 DEBUG [M:0;7bf281cf3991:40685 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dcdb4c9c2dfd4a6a890df34bf9822b2b as hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/dcdb4c9c2dfd4a6a890df34bf9822b2b 2024-11-13T11:27:39,835 INFO [M:0;7bf281cf3991:40685 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/dcdb4c9c2dfd4a6a890df34bf9822b2b, entries=1, sequenceid=59, filesize=4.9 K 2024-11-13T11:27:39,837 INFO [M:0;7bf281cf3991:40685 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 236ms, sequenceid=59, compaction requested=false 2024-11-13T11:27:39,854 INFO [M:0;7bf281cf3991:40685 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:27:39,854 DEBUG [M:0;7bf281cf3991:40685 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731497259601Disabling compacts and flushes for region at 1731497259601Disabling writes for close at 1731497259601Obtaining lock to block concurrent updates at 1731497259601Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731497259601Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731497259602 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731497259603 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731497259603Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731497259627 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731497259627Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731497259650 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731497259673 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731497259673Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731497259693 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731497259711 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731497259711Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731497259735 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731497259757 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731497259757Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@cc66c9e: reopening flushed file at 1731497259783 (+26 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d7cbcd5: reopening flushed file at 1731497259794 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@66258c28: reopening flushed file at 1731497259806 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12f40d00: reopening flushed file at 1731497259818 (+12 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 236ms, sequenceid=59, compaction requested=false at 1731497259837 (+19 ms)Writing region close event to WAL at 1731497259854 (+17 ms)Closed at 1731497259854 2024-11-13T11:27:39,855 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:39,855 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:39,855 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:39,855 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:39,856 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:39,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41663 is added to blk_1073741830_1006 (size=27973) 2024-11-13T11:27:39,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35943 is added to blk_1073741830_1006 (size=27973) 2024-11-13T11:27:39,861 INFO [M:0;7bf281cf3991:40685 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T11:27:39,861 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T11:27:39,862 INFO [M:0;7bf281cf3991:40685 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40685 2024-11-13T11:27:39,862 INFO [M:0;7bf281cf3991:40685 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T11:27:39,966 INFO [M:0;7bf281cf3991:40685 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T11:27:39,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:27:39,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40685-0x10038d553fb0000, quorum=127.0.0.1:49981, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:27:39,971 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@590b36b7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:27:39,974 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@237fc06a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:27:39,974 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:27:39,974 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35f1cf70{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:27:39,974 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43794ae7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/hadoop.log.dir/,STOPPED} 2024-11-13T11:27:39,977 WARN [BP-92278369-172.17.0.2-1731497160224 heartbeating to localhost/127.0.0.1:42441 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:27:39,977 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:27:39,978 WARN [BP-92278369-172.17.0.2-1731497160224 heartbeating to localhost/127.0.0.1:42441 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-92278369-172.17.0.2-1731497160224 (Datanode Uuid 576c2c7c-6912-4166-9b93-f20d35dbf493) service to localhost/127.0.0.1:42441 2024-11-13T11:27:39,978 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:27:39,979 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/cluster_2d346bbe-7588-fba0-5ea0-79662cb4f71f/data/data3/current/BP-92278369-172.17.0.2-1731497160224 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:27:39,979 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/cluster_2d346bbe-7588-fba0-5ea0-79662cb4f71f/data/data4/current/BP-92278369-172.17.0.2-1731497160224 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:27:39,980 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:27:39,983 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@11e88411{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:27:39,983 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75423500{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:27:39,983 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:27:39,983 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@621a7cbc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:27:39,983 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26c88bf4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/hadoop.log.dir/,STOPPED} 2024-11-13T11:27:39,985 WARN [BP-92278369-172.17.0.2-1731497160224 heartbeating to localhost/127.0.0.1:42441 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:27:39,985 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:27:39,985 WARN [BP-92278369-172.17.0.2-1731497160224 heartbeating to localhost/127.0.0.1:42441 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-92278369-172.17.0.2-1731497160224 (Datanode Uuid c419622e-e76f-41e1-a5ac-b914064dd84a) service to localhost/127.0.0.1:42441 2024-11-13T11:27:39,985 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:27:39,985 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/cluster_2d346bbe-7588-fba0-5ea0-79662cb4f71f/data/data1/current/BP-92278369-172.17.0.2-1731497160224 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:27:39,986 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/cluster_2d346bbe-7588-fba0-5ea0-79662cb4f71f/data/data2/current/BP-92278369-172.17.0.2-1731497160224 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:27:39,986 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:27:40,001 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6de997b9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T11:27:40,003 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a0da00a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:27:40,003 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:27:40,003 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@380b8195{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:27:40,004 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3aee6cb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/hadoop.log.dir/,STOPPED} 2024-11-13T11:27:40,012 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T11:27:40,056 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T11:27:40,066 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=78 (was 12) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:42441 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:42441 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42441 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42441 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42441 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:42441 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@262d32c9 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/7bf281cf3991:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/7bf281cf3991:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:42441 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42441 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/7bf281cf3991:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=192 (was 444), ProcessCount=11 (was 11), AvailableMemoryMB=2825 (was 4107) 2024-11-13T11:27:40,073 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=79, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=192, ProcessCount=11, AvailableMemoryMB=2826 2024-11-13T11:27:40,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T11:27:40,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/hadoop.log.dir so I do NOT create it in target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a 2024-11-13T11:27:40,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7aa531c-03d3-bc14-af0d-5f8b5a14665d/hadoop.tmp.dir so I do NOT create it in target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a 2024-11-13T11:27:40,074 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/cluster_d9d35baf-5701-c8ba-7691-7481a807fe7e, deleteOnExit=true 2024-11-13T11:27:40,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T11:27:40,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/test.cache.data in system properties and HBase conf 2024-11-13T11:27:40,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T11:27:40,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/hadoop.log.dir in system properties and HBase conf 2024-11-13T11:27:40,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T11:27:40,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T11:27:40,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T11:27:40,074 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T11:27:40,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T11:27:40,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T11:27:40,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T11:27:40,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T11:27:40,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T11:27:40,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T11:27:40,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T11:27:40,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T11:27:40,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T11:27:40,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/nfs.dump.dir in system properties and HBase conf 2024-11-13T11:27:40,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/java.io.tmpdir in system properties and HBase conf 2024-11-13T11:27:40,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T11:27:40,076 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T11:27:40,076 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T11:27:40,087 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T11:27:40,141 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:27:40,147 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:27:40,148 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:27:40,148 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:27:40,148 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T11:27:40,149 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:27:40,149 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a32e967{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:27:40,149 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7eb509db{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:27:40,249 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3eba774a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/java.io.tmpdir/jetty-localhost-34521-hadoop-hdfs-3_4_1-tests_jar-_-any-9186769911581090313/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T11:27:40,250 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7c8f0dfe{HTTP/1.1, (http/1.1)}{localhost:34521} 2024-11-13T11:27:40,250 INFO [Time-limited test {}] server.Server(415): Started @102410ms 2024-11-13T11:27:40,262 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T11:27:40,316 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:27:40,321 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:27:40,322 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:27:40,322 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:27:40,322 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T11:27:40,323 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5122c1fe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:27:40,323 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c282bb1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:27:40,445 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3ae5fd79{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/java.io.tmpdir/jetty-localhost-34551-hadoop-hdfs-3_4_1-tests_jar-_-any-14274510001255392392/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:27:40,446 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a3a779{HTTP/1.1, (http/1.1)}{localhost:34551} 2024-11-13T11:27:40,446 INFO [Time-limited test {}] server.Server(415): Started @102607ms 2024-11-13T11:27:40,448 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:27:40,494 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:27:40,500 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:27:40,501 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:27:40,501 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:27:40,501 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T11:27:40,502 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48dfdc62{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:27:40,502 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77d7a6d4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:27:40,523 WARN [Thread-436 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/cluster_d9d35baf-5701-c8ba-7691-7481a807fe7e/data/data1/current/BP-633221898-172.17.0.2-1731497260098/current, will proceed with Du for space computation calculation, 2024-11-13T11:27:40,523 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/cluster_d9d35baf-5701-c8ba-7691-7481a807fe7e/data/data2/current/BP-633221898-172.17.0.2-1731497260098/current, will proceed with Du for space computation calculation, 2024-11-13T11:27:40,544 WARN [Thread-415 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:27:40,546 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xed0df3c3e86fcaf1 with lease ID 0xfdf7a4d595a01658: Processing first storage report for DS-7de5a5a9-deb3-437d-861b-58590c1c74e2 from datanode DatanodeRegistration(127.0.0.1:38839, datanodeUuid=84ff2239-bd75-4bfb-afbd-1e48c522623c, infoPort=41591, infoSecurePort=0, ipcPort=38463, storageInfo=lv=-57;cid=testClusterID;nsid=1036064892;c=1731497260098) 2024-11-13T11:27:40,546 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xed0df3c3e86fcaf1 with lease ID 0xfdf7a4d595a01658: from storage DS-7de5a5a9-deb3-437d-861b-58590c1c74e2 node DatanodeRegistration(127.0.0.1:38839, datanodeUuid=84ff2239-bd75-4bfb-afbd-1e48c522623c, infoPort=41591, infoSecurePort=0, ipcPort=38463, storageInfo=lv=-57;cid=testClusterID;nsid=1036064892;c=1731497260098), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:27:40,546 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xed0df3c3e86fcaf1 with lease ID 0xfdf7a4d595a01658: Processing first storage report for DS-b10b2492-fa59-4f3f-9435-f64172261945 from datanode DatanodeRegistration(127.0.0.1:38839, datanodeUuid=84ff2239-bd75-4bfb-afbd-1e48c522623c, infoPort=41591, infoSecurePort=0, ipcPort=38463, storageInfo=lv=-57;cid=testClusterID;nsid=1036064892;c=1731497260098) 2024-11-13T11:27:40,547 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xed0df3c3e86fcaf1 with lease ID 0xfdf7a4d595a01658: from storage DS-b10b2492-fa59-4f3f-9435-f64172261945 node DatanodeRegistration(127.0.0.1:38839, datanodeUuid=84ff2239-bd75-4bfb-afbd-1e48c522623c, infoPort=41591, infoSecurePort=0, ipcPort=38463, storageInfo=lv=-57;cid=testClusterID;nsid=1036064892;c=1731497260098), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:27:40,621 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@508b71c4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/java.io.tmpdir/jetty-localhost-46857-hadoop-hdfs-3_4_1-tests_jar-_-any-4119247774981866369/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:27:40,621 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4c5ed954{HTTP/1.1, (http/1.1)}{localhost:46857} 2024-11-13T11:27:40,621 INFO [Time-limited test {}] server.Server(415): Started @102782ms 2024-11-13T11:27:40,623 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:27:40,871 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/cluster_d9d35baf-5701-c8ba-7691-7481a807fe7e/data/data4/current/BP-633221898-172.17.0.2-1731497260098/current, will proceed with Du for space computation calculation, 2024-11-13T11:27:40,871 WARN [Thread-462 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/cluster_d9d35baf-5701-c8ba-7691-7481a807fe7e/data/data3/current/BP-633221898-172.17.0.2-1731497260098/current, will proceed with Du for space computation calculation, 2024-11-13T11:27:40,887 WARN [Thread-451 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:27:40,890 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8a162a6d8c48c2bc with lease ID 0xfdf7a4d595a01659: Processing first storage report for DS-5b801deb-2686-4a38-9dde-7422cc14c206 from datanode DatanodeRegistration(127.0.0.1:35185, datanodeUuid=8e084de9-094e-4a0c-ba9e-6f1f6fe109d0, infoPort=33335, infoSecurePort=0, ipcPort=37189, storageInfo=lv=-57;cid=testClusterID;nsid=1036064892;c=1731497260098) 2024-11-13T11:27:40,890 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8a162a6d8c48c2bc with lease ID 0xfdf7a4d595a01659: from storage DS-5b801deb-2686-4a38-9dde-7422cc14c206 node DatanodeRegistration(127.0.0.1:35185, datanodeUuid=8e084de9-094e-4a0c-ba9e-6f1f6fe109d0, infoPort=33335, infoSecurePort=0, ipcPort=37189, storageInfo=lv=-57;cid=testClusterID;nsid=1036064892;c=1731497260098), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:27:40,890 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8a162a6d8c48c2bc with lease ID 0xfdf7a4d595a01659: Processing first storage report for DS-b63a8a4b-0382-468b-a67e-95f01a1e99e5 from datanode DatanodeRegistration(127.0.0.1:35185, datanodeUuid=8e084de9-094e-4a0c-ba9e-6f1f6fe109d0, infoPort=33335, infoSecurePort=0, ipcPort=37189, storageInfo=lv=-57;cid=testClusterID;nsid=1036064892;c=1731497260098) 2024-11-13T11:27:40,890 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8a162a6d8c48c2bc with lease ID 0xfdf7a4d595a01659: from storage DS-b63a8a4b-0382-468b-a67e-95f01a1e99e5 node DatanodeRegistration(127.0.0.1:35185, datanodeUuid=8e084de9-094e-4a0c-ba9e-6f1f6fe109d0, infoPort=33335, infoSecurePort=0, ipcPort=37189, storageInfo=lv=-57;cid=testClusterID;nsid=1036064892;c=1731497260098), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:27:40,950 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a 2024-11-13T11:27:40,952 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/cluster_d9d35baf-5701-c8ba-7691-7481a807fe7e/zookeeper_0, clientPort=62703, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/cluster_d9d35baf-5701-c8ba-7691-7481a807fe7e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/cluster_d9d35baf-5701-c8ba-7691-7481a807fe7e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T11:27:40,953 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62703 2024-11-13T11:27:40,954 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:27:40,955 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:27:40,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741825_1001 (size=7) 2024-11-13T11:27:40,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741825_1001 (size=7) 2024-11-13T11:27:40,969 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448 with version=8 2024-11-13T11:27:40,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/hbase-staging 2024-11-13T11:27:40,972 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7bf281cf3991:0 server-side Connection retries=45 2024-11-13T11:27:40,972 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:27:40,972 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T11:27:40,972 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T11:27:40,972 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:27:40,972 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T11:27:40,972 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T11:27:40,972 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T11:27:40,973 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46341 2024-11-13T11:27:40,975 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46341 connecting to ZooKeeper ensemble=127.0.0.1:62703 2024-11-13T11:27:40,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:463410x0, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T11:27:40,980 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46341-0x10038d6d4060000 connected 2024-11-13T11:27:40,999 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:27:41,001 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:27:41,005 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:27:41,005 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448, hbase.cluster.distributed=false 2024-11-13T11:27:41,007 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T11:27:41,009 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46341 2024-11-13T11:27:41,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46341 2024-11-13T11:27:41,013 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46341 2024-11-13T11:27:41,013 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46341 2024-11-13T11:27:41,013 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46341 2024-11-13T11:27:41,030 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7bf281cf3991:0 server-side Connection retries=45 2024-11-13T11:27:41,031 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:27:41,031 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T11:27:41,031 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T11:27:41,031 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:27:41,031 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T11:27:41,031 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T11:27:41,031 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T11:27:41,032 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41985 2024-11-13T11:27:41,035 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41985 connecting to ZooKeeper ensemble=127.0.0.1:62703 2024-11-13T11:27:41,036 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:27:41,040 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:27:41,046 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:419850x0, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T11:27:41,047 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:419850x0, quorum=127.0.0.1:62703, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:27:41,047 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T11:27:41,049 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41985-0x10038d6d4060001 connected 2024-11-13T11:27:41,057 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T11:27:41,058 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41985-0x10038d6d4060001, quorum=127.0.0.1:62703, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T11:27:41,060 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41985-0x10038d6d4060001, quorum=127.0.0.1:62703, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T11:27:41,065 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41985 2024-11-13T11:27:41,065 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41985 2024-11-13T11:27:41,066 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41985 2024-11-13T11:27:41,066 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41985 2024-11-13T11:27:41,069 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41985 2024-11-13T11:27:41,081 DEBUG [M:0;7bf281cf3991:46341 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7bf281cf3991:46341 2024-11-13T11:27:41,081 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7bf281cf3991,46341,1731497260971 2024-11-13T11:27:41,083 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41985-0x10038d6d4060001, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:27:41,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:27:41,084 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7bf281cf3991,46341,1731497260971 2024-11-13T11:27:41,085 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41985-0x10038d6d4060001, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T11:27:41,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:41,085 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41985-0x10038d6d4060001, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:41,086 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T11:27:41,087 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7bf281cf3991,46341,1731497260971 from backup master directory 2024-11-13T11:27:41,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7bf281cf3991,46341,1731497260971 2024-11-13T11:27:41,088 WARN [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T11:27:41,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:27:41,088 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7bf281cf3991,46341,1731497260971 2024-11-13T11:27:41,088 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41985-0x10038d6d4060001, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:27:41,093 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/hbase.id] with ID: 7bf3cc09-716f-46f6-bf93-dbbc6ce4d9a8 2024-11-13T11:27:41,093 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/.tmp/hbase.id 2024-11-13T11:27:41,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741826_1002 (size=42) 2024-11-13T11:27:41,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741826_1002 (size=42) 2024-11-13T11:27:41,104 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/.tmp/hbase.id]:[hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/hbase.id] 2024-11-13T11:27:41,120 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:27:41,120 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T11:27:41,122 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-13T11:27:41,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:41,123 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41985-0x10038d6d4060001, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:41,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741827_1003 (size=196) 2024-11-13T11:27:41,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741827_1003 (size=196) 2024-11-13T11:27:41,132 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T11:27:41,132 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T11:27:41,133 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:27:41,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741828_1004 (size=1189) 2024-11-13T11:27:41,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741828_1004 (size=1189) 2024-11-13T11:27:41,142 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store 2024-11-13T11:27:41,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741829_1005 (size=34) 2024-11-13T11:27:41,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741829_1005 (size=34) 2024-11-13T11:27:41,151 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:27:41,151 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T11:27:41,151 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:27:41,151 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:27:41,151 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T11:27:41,151 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:27:41,151 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:27:41,151 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731497261151Disabling compacts and flushes for region at 1731497261151Disabling writes for close at 1731497261151Writing region close event to WAL at 1731497261151Closed at 1731497261151 2024-11-13T11:27:41,153 WARN [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/.initializing 2024-11-13T11:27:41,153 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/WALs/7bf281cf3991,46341,1731497260971 2024-11-13T11:27:41,156 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C46341%2C1731497260971, suffix=, logDir=hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/WALs/7bf281cf3991,46341,1731497260971, archiveDir=hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/oldWALs, maxLogs=10 2024-11-13T11:27:41,157 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C46341%2C1731497260971.1731497261157 2024-11-13T11:27:41,163 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/WALs/7bf281cf3991,46341,1731497260971/7bf281cf3991%2C46341%2C1731497260971.1731497261157 2024-11-13T11:27:41,164 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41591:41591),(127.0.0.1/127.0.0.1:33335:33335)] 2024-11-13T11:27:41,164 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T11:27:41,165 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:27:41,165 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:41,165 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:41,167 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:41,169 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T11:27:41,169 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:41,169 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:27:41,170 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:41,171 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T11:27:41,171 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:41,172 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:27:41,172 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:41,175 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T11:27:41,175 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:41,176 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:27:41,176 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:41,178 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T11:27:41,178 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:41,179 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:27:41,179 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:41,180 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:41,180 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:41,182 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:41,182 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:41,183 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T11:27:41,185 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:41,190 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T11:27:41,191 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=773493, jitterRate=-0.016453325748443604}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T11:27:41,192 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731497261165Initializing all the Stores at 1731497261166 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497261166Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497261167 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497261167Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497261167Cleaning up temporary data from old regions at 1731497261182 (+15 ms)Region opened successfully at 1731497261192 (+10 ms) 2024-11-13T11:27:41,193 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T11:27:41,197 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@588c7c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7bf281cf3991/172.17.0.2:0 2024-11-13T11:27:41,198 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T11:27:41,199 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T11:27:41,199 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T11:27:41,199 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T11:27:41,199 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T11:27:41,200 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T11:27:41,200 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T11:27:41,204 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T11:27:41,205 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T11:27:41,206 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T11:27:41,206 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T11:27:41,207 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T11:27:41,208 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T11:27:41,208 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T11:27:41,210 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T11:27:41,211 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T11:27:41,212 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T11:27:41,213 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T11:27:41,215 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T11:27:41,217 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T11:27:41,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T11:27:41,219 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41985-0x10038d6d4060001, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T11:27:41,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:41,219 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41985-0x10038d6d4060001, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:41,220 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7bf281cf3991,46341,1731497260971, sessionid=0x10038d6d4060000, setting cluster-up flag (Was=false) 2024-11-13T11:27:41,223 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41985-0x10038d6d4060001, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:41,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:41,227 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T11:27:41,228 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7bf281cf3991,46341,1731497260971 2024-11-13T11:27:41,231 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41985-0x10038d6d4060001, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:41,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:41,235 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T11:27:41,236 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7bf281cf3991,46341,1731497260971 2024-11-13T11:27:41,238 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T11:27:41,243 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T11:27:41,243 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T11:27:41,243 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T11:27:41,244 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7bf281cf3991,46341,1731497260971 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T11:27:41,245 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:27:41,245 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:27:41,245 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:27:41,245 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:27:41,245 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7bf281cf3991:0, corePoolSize=10, maxPoolSize=10 2024-11-13T11:27:41,246 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:41,246 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7bf281cf3991:0, corePoolSize=2, maxPoolSize=2 2024-11-13T11:27:41,246 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:41,249 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731497291249 2024-11-13T11:27:41,249 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T11:27:41,249 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T11:27:41,249 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T11:27:41,249 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T11:27:41,249 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T11:27:41,249 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T11:27:41,250 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:27:41,250 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T11:27:41,251 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:41,252 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:41,252 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T11:27:41,253 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T11:27:41,253 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T11:27:41,253 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T11:27:41,257 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T11:27:41,257 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T11:27:41,260 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497261257,5,FailOnTimeoutGroup] 2024-11-13T11:27:41,260 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497261260,5,FailOnTimeoutGroup] 2024-11-13T11:27:41,260 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:41,260 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T11:27:41,260 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:41,260 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:41,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741831_1007 (size=1321) 2024-11-13T11:27:41,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741831_1007 (size=1321) 2024-11-13T11:27:41,271 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T11:27:41,271 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448 2024-11-13T11:27:41,274 INFO [RS:0;7bf281cf3991:41985 {}] regionserver.HRegionServer(746): ClusterId : 7bf3cc09-716f-46f6-bf93-dbbc6ce4d9a8 2024-11-13T11:27:41,274 DEBUG [RS:0;7bf281cf3991:41985 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T11:27:41,276 DEBUG [RS:0;7bf281cf3991:41985 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T11:27:41,276 DEBUG [RS:0;7bf281cf3991:41985 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T11:27:41,278 DEBUG [RS:0;7bf281cf3991:41985 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T11:27:41,278 DEBUG [RS:0;7bf281cf3991:41985 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d05b66e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7bf281cf3991/172.17.0.2:0 2024-11-13T11:27:41,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741832_1008 (size=32) 2024-11-13T11:27:41,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741832_1008 (size=32) 2024-11-13T11:27:41,288 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:27:41,293 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T11:27:41,295 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T11:27:41,295 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:41,296 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:27:41,296 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T11:27:41,296 DEBUG [RS:0;7bf281cf3991:41985 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7bf281cf3991:41985 2024-11-13T11:27:41,296 INFO [RS:0;7bf281cf3991:41985 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T11:27:41,296 INFO [RS:0;7bf281cf3991:41985 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T11:27:41,296 DEBUG [RS:0;7bf281cf3991:41985 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T11:27:41,297 INFO [RS:0;7bf281cf3991:41985 {}] regionserver.HRegionServer(2659): reportForDuty to master=7bf281cf3991,46341,1731497260971 with port=41985, startcode=1731497261030 2024-11-13T11:27:41,298 DEBUG [RS:0;7bf281cf3991:41985 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T11:27:41,298 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T11:27:41,298 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:41,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:27:41,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T11:27:41,301 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T11:27:41,301 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:41,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:27:41,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T11:27:41,302 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41301, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T11:27:41,303 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46341 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7bf281cf3991,41985,1731497261030 2024-11-13T11:27:41,303 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46341 {}] master.ServerManager(517): Registering regionserver=7bf281cf3991,41985,1731497261030 2024-11-13T11:27:41,304 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T11:27:41,304 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:41,305 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:27:41,305 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T11:27:41,305 DEBUG [RS:0;7bf281cf3991:41985 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448 2024-11-13T11:27:41,305 DEBUG [RS:0;7bf281cf3991:41985 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44029 2024-11-13T11:27:41,305 DEBUG [RS:0;7bf281cf3991:41985 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T11:27:41,306 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/data/hbase/meta/1588230740 2024-11-13T11:27:41,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T11:27:41,307 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/data/hbase/meta/1588230740 2024-11-13T11:27:41,307 DEBUG [RS:0;7bf281cf3991:41985 {}] zookeeper.ZKUtil(111): regionserver:41985-0x10038d6d4060001, quorum=127.0.0.1:62703, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7bf281cf3991,41985,1731497261030 2024-11-13T11:27:41,307 WARN [RS:0;7bf281cf3991:41985 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T11:27:41,307 INFO [RS:0;7bf281cf3991:41985 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:27:41,308 DEBUG [RS:0;7bf281cf3991:41985 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/WALs/7bf281cf3991,41985,1731497261030 2024-11-13T11:27:41,308 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7bf281cf3991,41985,1731497261030] 2024-11-13T11:27:41,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T11:27:41,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T11:27:41,310 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T11:27:41,311 INFO [RS:0;7bf281cf3991:41985 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T11:27:41,311 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T11:27:41,317 INFO [RS:0;7bf281cf3991:41985 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T11:27:41,318 INFO [RS:0;7bf281cf3991:41985 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T11:27:41,318 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T11:27:41,318 INFO [RS:0;7bf281cf3991:41985 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:41,318 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=852028, jitterRate=0.08340980112552643}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T11:27:41,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731497261289Initializing all the Stores at 1731497261291 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497261291Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497261293 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497261293Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497261293Cleaning up temporary data from old regions at 1731497261309 (+16 ms)Region opened successfully at 1731497261319 (+10 ms) 2024-11-13T11:27:41,320 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T11:27:41,320 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T11:27:41,320 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T11:27:41,320 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T11:27:41,320 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T11:27:41,321 INFO [RS:0;7bf281cf3991:41985 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T11:27:41,321 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T11:27:41,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731497261319Disabling compacts and flushes for region at 1731497261319Disabling writes for close at 1731497261320 (+1 ms)Writing region close event to WAL at 1731497261321 (+1 ms)Closed at 1731497261321 2024-11-13T11:27:41,322 INFO [RS:0;7bf281cf3991:41985 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T11:27:41,322 INFO [RS:0;7bf281cf3991:41985 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:41,323 DEBUG [RS:0;7bf281cf3991:41985 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:41,323 DEBUG [RS:0;7bf281cf3991:41985 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:41,323 DEBUG [RS:0;7bf281cf3991:41985 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:41,323 DEBUG [RS:0;7bf281cf3991:41985 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:41,323 DEBUG [RS:0;7bf281cf3991:41985 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:41,323 DEBUG [RS:0;7bf281cf3991:41985 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7bf281cf3991:0, corePoolSize=2, maxPoolSize=2 2024-11-13T11:27:41,323 DEBUG [RS:0;7bf281cf3991:41985 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:41,323 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:27:41,323 DEBUG [RS:0;7bf281cf3991:41985 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:41,323 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T11:27:41,323 DEBUG [RS:0;7bf281cf3991:41985 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:41,323 DEBUG [RS:0;7bf281cf3991:41985 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:41,323 DEBUG [RS:0;7bf281cf3991:41985 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:41,323 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T11:27:41,323 DEBUG [RS:0;7bf281cf3991:41985 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:41,323 DEBUG [RS:0;7bf281cf3991:41985 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7bf281cf3991:0, corePoolSize=3, maxPoolSize=3 2024-11-13T11:27:41,324 DEBUG [RS:0;7bf281cf3991:41985 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0, corePoolSize=3, maxPoolSize=3 2024-11-13T11:27:41,326 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T11:27:41,327 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T11:27:41,331 INFO [RS:0;7bf281cf3991:41985 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:41,331 INFO [RS:0;7bf281cf3991:41985 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:41,331 INFO [RS:0;7bf281cf3991:41985 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:41,332 INFO [RS:0;7bf281cf3991:41985 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:41,332 INFO [RS:0;7bf281cf3991:41985 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:41,332 INFO [RS:0;7bf281cf3991:41985 {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,41985,1731497261030-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T11:27:41,348 INFO [RS:0;7bf281cf3991:41985 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T11:27:41,348 INFO [RS:0;7bf281cf3991:41985 {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,41985,1731497261030-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:41,348 INFO [RS:0;7bf281cf3991:41985 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:41,349 INFO [RS:0;7bf281cf3991:41985 {}] regionserver.Replication(171): 7bf281cf3991,41985,1731497261030 started 2024-11-13T11:27:41,364 INFO [RS:0;7bf281cf3991:41985 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:41,364 INFO [RS:0;7bf281cf3991:41985 {}] regionserver.HRegionServer(1482): Serving as 7bf281cf3991,41985,1731497261030, RpcServer on 7bf281cf3991/172.17.0.2:41985, sessionid=0x10038d6d4060001 2024-11-13T11:27:41,364 DEBUG [RS:0;7bf281cf3991:41985 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T11:27:41,364 DEBUG [RS:0;7bf281cf3991:41985 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7bf281cf3991,41985,1731497261030 2024-11-13T11:27:41,364 DEBUG [RS:0;7bf281cf3991:41985 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7bf281cf3991,41985,1731497261030' 2024-11-13T11:27:41,364 DEBUG [RS:0;7bf281cf3991:41985 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T11:27:41,365 DEBUG [RS:0;7bf281cf3991:41985 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T11:27:41,366 DEBUG [RS:0;7bf281cf3991:41985 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T11:27:41,366 DEBUG [RS:0;7bf281cf3991:41985 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T11:27:41,366 DEBUG [RS:0;7bf281cf3991:41985 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7bf281cf3991,41985,1731497261030 2024-11-13T11:27:41,366 DEBUG [RS:0;7bf281cf3991:41985 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7bf281cf3991,41985,1731497261030' 2024-11-13T11:27:41,366 DEBUG [RS:0;7bf281cf3991:41985 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T11:27:41,366 DEBUG [RS:0;7bf281cf3991:41985 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T11:27:41,367 DEBUG [RS:0;7bf281cf3991:41985 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T11:27:41,367 INFO [RS:0;7bf281cf3991:41985 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T11:27:41,367 INFO [RS:0;7bf281cf3991:41985 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T11:27:41,469 INFO [RS:0;7bf281cf3991:41985 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C41985%2C1731497261030, suffix=, logDir=hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/WALs/7bf281cf3991,41985,1731497261030, archiveDir=hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/oldWALs, maxLogs=32 2024-11-13T11:27:41,471 INFO [RS:0;7bf281cf3991:41985 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C41985%2C1731497261030.1731497261471 2024-11-13T11:27:41,477 WARN [7bf281cf3991:46341 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T11:27:41,479 INFO [RS:0;7bf281cf3991:41985 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/WALs/7bf281cf3991,41985,1731497261030/7bf281cf3991%2C41985%2C1731497261030.1731497261471 2024-11-13T11:27:41,481 DEBUG [RS:0;7bf281cf3991:41985 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33335:33335),(127.0.0.1/127.0.0.1:41591:41591)] 2024-11-13T11:27:41,728 DEBUG [7bf281cf3991:46341 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T11:27:41,729 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7bf281cf3991,41985,1731497261030 2024-11-13T11:27:41,731 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7bf281cf3991,41985,1731497261030, state=OPENING 2024-11-13T11:27:41,732 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T11:27:41,734 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41985-0x10038d6d4060001, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:41,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:41,734 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:27:41,735 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:27:41,735 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T11:27:41,735 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7bf281cf3991,41985,1731497261030}] 2024-11-13T11:27:41,889 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T11:27:41,892 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57193, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T11:27:41,898 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T11:27:41,898 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:27:41,903 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C41985%2C1731497261030.meta, suffix=.meta, logDir=hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/WALs/7bf281cf3991,41985,1731497261030, archiveDir=hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/oldWALs, maxLogs=32 2024-11-13T11:27:41,907 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C41985%2C1731497261030.meta.1731497261906.meta 2024-11-13T11:27:41,915 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/WALs/7bf281cf3991,41985,1731497261030/7bf281cf3991%2C41985%2C1731497261030.meta.1731497261906.meta 2024-11-13T11:27:41,917 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33335:33335),(127.0.0.1/127.0.0.1:41591:41591)] 2024-11-13T11:27:41,918 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T11:27:41,919 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T11:27:41,919 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T11:27:41,919 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T11:27:41,919 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T11:27:41,919 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:27:41,920 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T11:27:41,920 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T11:27:41,922 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T11:27:41,924 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T11:27:41,924 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:41,926 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:27:41,927 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T11:27:41,929 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T11:27:41,929 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:41,930 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:27:41,930 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T11:27:41,932 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T11:27:41,932 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:41,934 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:27:41,934 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T11:27:41,936 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T11:27:41,936 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:41,938 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:27:41,938 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T11:27:41,939 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/data/hbase/meta/1588230740 2024-11-13T11:27:41,941 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/data/hbase/meta/1588230740 2024-11-13T11:27:41,943 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T11:27:41,943 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T11:27:41,945 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T11:27:41,948 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T11:27:41,949 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=691485, jitterRate=-0.12073244154453278}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T11:27:41,950 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T11:27:41,951 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731497261920Writing region info on filesystem at 1731497261920Initializing all the Stores at 1731497261922 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497261922Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497261922Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497261922Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497261922Cleaning up temporary data from old regions at 1731497261943 (+21 ms)Running coprocessor post-open hooks at 1731497261950 (+7 ms)Region opened successfully at 1731497261951 (+1 ms) 2024-11-13T11:27:41,953 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731497261888 2024-11-13T11:27:41,957 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T11:27:41,958 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T11:27:41,959 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7bf281cf3991,41985,1731497261030 2024-11-13T11:27:41,962 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7bf281cf3991,41985,1731497261030, state=OPEN 2024-11-13T11:27:41,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T11:27:41,965 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41985-0x10038d6d4060001, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T11:27:41,965 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:27:41,965 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7bf281cf3991,41985,1731497261030 2024-11-13T11:27:41,965 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:27:41,970 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T11:27:41,970 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7bf281cf3991,41985,1731497261030 in 230 msec 2024-11-13T11:27:41,976 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T11:27:41,976 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 647 msec 2024-11-13T11:27:41,978 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:27:41,978 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T11:27:41,981 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T11:27:41,982 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7bf281cf3991,41985,1731497261030, seqNum=-1] 2024-11-13T11:27:41,982 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T11:27:41,984 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55737, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T11:27:41,994 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 749 msec 2024-11-13T11:27:41,994 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731497261994, completionTime=-1 2024-11-13T11:27:41,994 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T11:27:41,994 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T11:27:41,998 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T11:27:41,998 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731497321998 2024-11-13T11:27:41,998 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731497381998 2024-11-13T11:27:41,998 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 3 msec 2024-11-13T11:27:41,999 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,46341,1731497260971-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:41,999 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,46341,1731497260971-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:41,999 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,46341,1731497260971-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:41,999 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7bf281cf3991:46341, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:41,999 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:42,000 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:42,002 DEBUG [master/7bf281cf3991:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T11:27:42,006 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.918sec 2024-11-13T11:27:42,006 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T11:27:42,006 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T11:27:42,006 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T11:27:42,006 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T11:27:42,007 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T11:27:42,007 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,46341,1731497260971-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T11:27:42,007 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,46341,1731497260971-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T11:27:42,011 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T11:27:42,011 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T11:27:42,011 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,46341,1731497260971-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:42,075 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2af5e7db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:27:42,075 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7bf281cf3991,46341,-1 for getting cluster id 2024-11-13T11:27:42,075 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T11:27:42,078 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7bf3cc09-716f-46f6-bf93-dbbc6ce4d9a8' 2024-11-13T11:27:42,079 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T11:27:42,079 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7bf3cc09-716f-46f6-bf93-dbbc6ce4d9a8" 2024-11-13T11:27:42,080 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d86c70f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:27:42,080 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7bf281cf3991,46341,-1] 2024-11-13T11:27:42,080 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T11:27:42,081 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:27:42,083 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57144, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T11:27:42,085 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36b1eaa8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:27:42,085 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T11:27:42,087 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7bf281cf3991,41985,1731497261030, seqNum=-1] 2024-11-13T11:27:42,088 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T11:27:42,092 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54154, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T11:27:42,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7bf281cf3991,46341,1731497260971 2024-11-13T11:27:42,096 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:27:42,101 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T11:27:42,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T11:27:42,101 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T11:27:42,102 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:27:42,102 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:27:42,102 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:27:42,102 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T11:27:42,102 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T11:27:42,102 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=355378974, stopped=false 2024-11-13T11:27:42,103 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7bf281cf3991,46341,1731497260971 2024-11-13T11:27:42,104 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41985-0x10038d6d4060001, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T11:27:42,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T11:27:42,104 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41985-0x10038d6d4060001, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:42,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:42,104 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T11:27:42,105 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41985-0x10038d6d4060001, quorum=127.0.0.1:62703, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:27:42,105 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:27:42,106 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T11:27:42,107 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:27:42,107 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:27:42,107 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7bf281cf3991,41985,1731497261030' ***** 2024-11-13T11:27:42,107 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T11:27:42,107 INFO [RS:0;7bf281cf3991:41985 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T11:27:42,107 INFO [RS:0;7bf281cf3991:41985 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T11:27:42,107 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T11:27:42,107 INFO [RS:0;7bf281cf3991:41985 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T11:27:42,107 INFO [RS:0;7bf281cf3991:41985 {}] regionserver.HRegionServer(959): stopping server 7bf281cf3991,41985,1731497261030 2024-11-13T11:27:42,107 INFO [RS:0;7bf281cf3991:41985 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T11:27:42,108 INFO [RS:0;7bf281cf3991:41985 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7bf281cf3991:41985. 2024-11-13T11:27:42,108 DEBUG [RS:0;7bf281cf3991:41985 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:27:42,108 DEBUG [RS:0;7bf281cf3991:41985 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:27:42,108 INFO [RS:0;7bf281cf3991:41985 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T11:27:42,108 INFO [RS:0;7bf281cf3991:41985 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T11:27:42,108 INFO [RS:0;7bf281cf3991:41985 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T11:27:42,108 INFO [RS:0;7bf281cf3991:41985 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T11:27:42,108 INFO [RS:0;7bf281cf3991:41985 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-13T11:27:42,109 DEBUG [RS:0;7bf281cf3991:41985 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-13T11:27:42,109 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T11:27:42,109 DEBUG [RS:0;7bf281cf3991:41985 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-13T11:27:42,109 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T11:27:42,109 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T11:27:42,109 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T11:27:42,109 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T11:27:42,109 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-13T11:27:42,133 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/data/hbase/meta/1588230740/.tmp/ns/57822b22845044ce8f0f43f9d7e420b1 is 43, key is default/ns:d/1731497261985/Put/seqid=0 2024-11-13T11:27:42,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741835_1011 (size=5153) 2024-11-13T11:27:42,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741835_1011 (size=5153) 2024-11-13T11:27:42,147 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/data/hbase/meta/1588230740/.tmp/ns/57822b22845044ce8f0f43f9d7e420b1 2024-11-13T11:27:42,158 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/data/hbase/meta/1588230740/.tmp/ns/57822b22845044ce8f0f43f9d7e420b1 as hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/data/hbase/meta/1588230740/ns/57822b22845044ce8f0f43f9d7e420b1 2024-11-13T11:27:42,168 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/data/hbase/meta/1588230740/ns/57822b22845044ce8f0f43f9d7e420b1, entries=2, sequenceid=6, filesize=5.0 K 2024-11-13T11:27:42,170 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 61ms, sequenceid=6, compaction requested=false 2024-11-13T11:27:42,171 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T11:27:42,178 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T11:27:42,182 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T11:27:42,182 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T11:27:42,182 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731497262109Running coprocessor pre-close hooks at 1731497262109Disabling compacts and flushes for region at 1731497262109Disabling writes for close at 1731497262109Obtaining lock to block concurrent updates at 1731497262109Preparing flush snapshotting stores in 1588230740 at 1731497262109Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731497262110 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731497262111 (+1 ms)Flushing 1588230740/ns: creating writer at 1731497262111Flushing 1588230740/ns: appending metadata at 1731497262132 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1731497262132Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e4d44c9: reopening flushed file at 1731497262157 (+25 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 61ms, sequenceid=6, compaction requested=false at 1731497262171 (+14 ms)Writing region close event to WAL at 1731497262174 (+3 ms)Running coprocessor post-close hooks at 1731497262182 (+8 ms)Closed at 1731497262182 2024-11-13T11:27:42,182 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T11:27:42,309 INFO [RS:0;7bf281cf3991:41985 {}] regionserver.HRegionServer(976): stopping server 7bf281cf3991,41985,1731497261030; all regions closed. 2024-11-13T11:27:42,310 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:42,310 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:42,310 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:42,310 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:42,310 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:42,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741834_1010 (size=1152) 2024-11-13T11:27:42,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741834_1010 (size=1152) 2024-11-13T11:27:42,317 DEBUG [RS:0;7bf281cf3991:41985 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/oldWALs 2024-11-13T11:27:42,317 INFO [RS:0;7bf281cf3991:41985 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7bf281cf3991%2C41985%2C1731497261030.meta:.meta(num 1731497261906) 2024-11-13T11:27:42,318 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:42,318 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:42,318 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:42,318 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:42,318 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:42,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741833_1009 (size=93) 2024-11-13T11:27:42,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741833_1009 (size=93) 2024-11-13T11:27:42,326 DEBUG [RS:0;7bf281cf3991:41985 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/oldWALs 2024-11-13T11:27:42,326 INFO [RS:0;7bf281cf3991:41985 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7bf281cf3991%2C41985%2C1731497261030:(num 1731497261471) 2024-11-13T11:27:42,326 DEBUG [RS:0;7bf281cf3991:41985 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:27:42,326 INFO [RS:0;7bf281cf3991:41985 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T11:27:42,326 INFO [RS:0;7bf281cf3991:41985 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T11:27:42,326 INFO [RS:0;7bf281cf3991:41985 {}] hbase.ChoreService(370): Chore service for: regionserver/7bf281cf3991:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T11:27:42,326 INFO [RS:0;7bf281cf3991:41985 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T11:27:42,327 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T11:27:42,327 INFO [RS:0;7bf281cf3991:41985 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41985 2024-11-13T11:27:42,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T11:27:42,329 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41985-0x10038d6d4060001, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7bf281cf3991,41985,1731497261030 2024-11-13T11:27:42,329 INFO [RS:0;7bf281cf3991:41985 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T11:27:42,329 ERROR [pool-180-thread-1-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$359/0x00007f6c20900aa0@88f4818 rejected from java.util.concurrent.ThreadPoolExecutor@4d8dc76d[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-13T11:27:42,331 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7bf281cf3991,41985,1731497261030] 2024-11-13T11:27:42,332 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7bf281cf3991,41985,1731497261030 already deleted, retry=false 2024-11-13T11:27:42,332 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7bf281cf3991,41985,1731497261030 expired; onlineServers=0 2024-11-13T11:27:42,332 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7bf281cf3991,46341,1731497260971' ***** 2024-11-13T11:27:42,332 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T11:27:42,332 INFO [M:0;7bf281cf3991:46341 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T11:27:42,332 INFO [M:0;7bf281cf3991:46341 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T11:27:42,332 DEBUG [M:0;7bf281cf3991:46341 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T11:27:42,332 DEBUG [M:0;7bf281cf3991:46341 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T11:27:42,332 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T11:27:42,332 DEBUG [master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497261257 {}] cleaner.HFileCleaner(306): Exit Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497261257,5,FailOnTimeoutGroup] 2024-11-13T11:27:42,332 DEBUG [master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497261260 {}] cleaner.HFileCleaner(306): Exit Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497261260,5,FailOnTimeoutGroup] 2024-11-13T11:27:42,332 INFO [M:0;7bf281cf3991:46341 {}] hbase.ChoreService(370): Chore service for: master/7bf281cf3991:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T11:27:42,332 INFO [M:0;7bf281cf3991:46341 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T11:27:42,333 DEBUG [M:0;7bf281cf3991:46341 {}] master.HMaster(1795): Stopping service threads 2024-11-13T11:27:42,333 INFO [M:0;7bf281cf3991:46341 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T11:27:42,333 INFO [M:0;7bf281cf3991:46341 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T11:27:42,333 INFO [M:0;7bf281cf3991:46341 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T11:27:42,333 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T11:27:42,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T11:27:42,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:42,334 DEBUG [M:0;7bf281cf3991:46341 {}] zookeeper.ZKUtil(347): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T11:27:42,334 WARN [M:0;7bf281cf3991:46341 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T11:27:42,334 INFO [M:0;7bf281cf3991:46341 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/.lastflushedseqids 2024-11-13T11:27:42,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741836_1012 (size=99) 2024-11-13T11:27:42,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741836_1012 (size=99) 2024-11-13T11:27:42,431 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41985-0x10038d6d4060001, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:27:42,431 INFO [RS:0;7bf281cf3991:41985 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T11:27:42,431 INFO [RS:0;7bf281cf3991:41985 {}] regionserver.HRegionServer(1031): Exiting; stopping=7bf281cf3991,41985,1731497261030; zookeeper connection closed. 2024-11-13T11:27:42,431 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41985-0x10038d6d4060001, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:27:42,431 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6aba2761 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6aba2761 2024-11-13T11:27:42,431 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T11:27:42,744 INFO [M:0;7bf281cf3991:46341 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T11:27:42,745 INFO [M:0;7bf281cf3991:46341 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T11:27:42,745 DEBUG [M:0;7bf281cf3991:46341 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T11:27:42,745 INFO [M:0;7bf281cf3991:46341 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:27:42,746 DEBUG [M:0;7bf281cf3991:46341 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:27:42,746 DEBUG [M:0;7bf281cf3991:46341 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T11:27:42,746 DEBUG [M:0;7bf281cf3991:46341 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:27:42,746 INFO [M:0;7bf281cf3991:46341 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-13T11:27:42,771 DEBUG [M:0;7bf281cf3991:46341 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2508c234463d49bf8ea8103de982ac65 is 82, key is hbase:meta,,1/info:regioninfo/1731497261959/Put/seqid=0 2024-11-13T11:27:42,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741837_1013 (size=5672) 2024-11-13T11:27:42,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741837_1013 (size=5672) 2024-11-13T11:27:42,777 INFO [M:0;7bf281cf3991:46341 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2508c234463d49bf8ea8103de982ac65 2024-11-13T11:27:42,801 DEBUG [M:0;7bf281cf3991:46341 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e559f5eb47ce4804a5da2d4d88030120 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731497261992/Put/seqid=0 2024-11-13T11:27:42,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741838_1014 (size=5275) 2024-11-13T11:27:42,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741838_1014 (size=5275) 2024-11-13T11:27:42,807 INFO [M:0;7bf281cf3991:46341 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e559f5eb47ce4804a5da2d4d88030120 2024-11-13T11:27:42,831 DEBUG [M:0;7bf281cf3991:46341 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bb28e7712ada4cbabfba3b9b7280a30b is 69, key is 7bf281cf3991,41985,1731497261030/rs:state/1731497261303/Put/seqid=0 2024-11-13T11:27:42,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741839_1015 (size=5156) 2024-11-13T11:27:42,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741839_1015 (size=5156) 2024-11-13T11:27:42,837 INFO [M:0;7bf281cf3991:46341 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bb28e7712ada4cbabfba3b9b7280a30b 2024-11-13T11:27:42,859 DEBUG [M:0;7bf281cf3991:46341 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f5fa12c4146a47d9be33ada129e9d8ee is 52, key is load_balancer_on/state:d/1731497262099/Put/seqid=0 2024-11-13T11:27:42,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741840_1016 (size=5056) 2024-11-13T11:27:42,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741840_1016 (size=5056) 2024-11-13T11:27:42,865 INFO [M:0;7bf281cf3991:46341 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f5fa12c4146a47d9be33ada129e9d8ee 2024-11-13T11:27:42,872 DEBUG [M:0;7bf281cf3991:46341 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2508c234463d49bf8ea8103de982ac65 as hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2508c234463d49bf8ea8103de982ac65 2024-11-13T11:27:42,880 INFO [M:0;7bf281cf3991:46341 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2508c234463d49bf8ea8103de982ac65, entries=8, sequenceid=29, filesize=5.5 K 2024-11-13T11:27:42,882 DEBUG [M:0;7bf281cf3991:46341 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e559f5eb47ce4804a5da2d4d88030120 as hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e559f5eb47ce4804a5da2d4d88030120 2024-11-13T11:27:42,888 INFO [M:0;7bf281cf3991:46341 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e559f5eb47ce4804a5da2d4d88030120, entries=3, sequenceid=29, filesize=5.2 K 2024-11-13T11:27:42,889 DEBUG [M:0;7bf281cf3991:46341 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bb28e7712ada4cbabfba3b9b7280a30b as hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bb28e7712ada4cbabfba3b9b7280a30b 2024-11-13T11:27:42,895 INFO [M:0;7bf281cf3991:46341 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bb28e7712ada4cbabfba3b9b7280a30b, entries=1, sequenceid=29, filesize=5.0 K 2024-11-13T11:27:42,896 DEBUG [M:0;7bf281cf3991:46341 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f5fa12c4146a47d9be33ada129e9d8ee as hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f5fa12c4146a47d9be33ada129e9d8ee 2024-11-13T11:27:42,902 INFO [M:0;7bf281cf3991:46341 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44029/user/jenkins/test-data/e1a44aa1-1ee1-a574-7f0c-5704988ab448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f5fa12c4146a47d9be33ada129e9d8ee, entries=1, sequenceid=29, filesize=4.9 K 2024-11-13T11:27:42,904 INFO [M:0;7bf281cf3991:46341 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 157ms, sequenceid=29, compaction requested=false 2024-11-13T11:27:42,905 INFO [M:0;7bf281cf3991:46341 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:27:42,905 DEBUG [M:0;7bf281cf3991:46341 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731497262745Disabling compacts and flushes for region at 1731497262745Disabling writes for close at 1731497262746 (+1 ms)Obtaining lock to block concurrent updates at 1731497262747 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731497262747Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731497262748 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731497262750 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731497262751 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731497262770 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731497262771 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731497262783 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731497262800 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731497262800Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731497262813 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731497262830 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731497262830Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731497262844 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731497262859 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731497262859Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4cc397ec: reopening flushed file at 1731497262871 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4fad4366: reopening flushed file at 1731497262880 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@497aea42: reopening flushed file at 1731497262888 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5272717d: reopening flushed file at 1731497262895 (+7 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 157ms, sequenceid=29, compaction requested=false at 1731497262904 (+9 ms)Writing region close event to WAL at 1731497262905 (+1 ms)Closed at 1731497262905 2024-11-13T11:27:42,906 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:42,906 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:42,906 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:42,906 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:42,906 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:42,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741830_1006 (size=10311) 2024-11-13T11:27:42,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741830_1006 (size=10311) 2024-11-13T11:27:42,909 INFO [M:0;7bf281cf3991:46341 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T11:27:42,909 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T11:27:42,909 INFO [M:0;7bf281cf3991:46341 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46341 2024-11-13T11:27:42,909 INFO [M:0;7bf281cf3991:46341 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T11:27:43,011 INFO [M:0;7bf281cf3991:46341 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T11:27:43,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:27:43,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46341-0x10038d6d4060000, quorum=127.0.0.1:62703, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:27:43,016 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@508b71c4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:27:43,017 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4c5ed954{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:27:43,017 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:27:43,018 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77d7a6d4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:27:43,018 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48dfdc62{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/hadoop.log.dir/,STOPPED} 2024-11-13T11:27:43,020 WARN [BP-633221898-172.17.0.2-1731497260098 heartbeating to localhost/127.0.0.1:44029 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:27:43,020 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:27:43,020 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:27:43,020 WARN [BP-633221898-172.17.0.2-1731497260098 heartbeating to localhost/127.0.0.1:44029 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-633221898-172.17.0.2-1731497260098 (Datanode Uuid 8e084de9-094e-4a0c-ba9e-6f1f6fe109d0) service to localhost/127.0.0.1:44029 2024-11-13T11:27:43,021 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/cluster_d9d35baf-5701-c8ba-7691-7481a807fe7e/data/data3/current/BP-633221898-172.17.0.2-1731497260098 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:27:43,021 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/cluster_d9d35baf-5701-c8ba-7691-7481a807fe7e/data/data4/current/BP-633221898-172.17.0.2-1731497260098 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:27:43,021 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:27:43,023 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3ae5fd79{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:27:43,023 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a3a779{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:27:43,023 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:27:43,024 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c282bb1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:27:43,024 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5122c1fe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/hadoop.log.dir/,STOPPED} 2024-11-13T11:27:43,026 WARN [BP-633221898-172.17.0.2-1731497260098 heartbeating to localhost/127.0.0.1:44029 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:27:43,026 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:27:43,026 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:27:43,026 WARN [BP-633221898-172.17.0.2-1731497260098 heartbeating to localhost/127.0.0.1:44029 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-633221898-172.17.0.2-1731497260098 (Datanode Uuid 84ff2239-bd75-4bfb-afbd-1e48c522623c) service to localhost/127.0.0.1:44029 2024-11-13T11:27:43,026 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/cluster_d9d35baf-5701-c8ba-7691-7481a807fe7e/data/data1/current/BP-633221898-172.17.0.2-1731497260098 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:27:43,027 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/cluster_d9d35baf-5701-c8ba-7691-7481a807fe7e/data/data2/current/BP-633221898-172.17.0.2-1731497260098 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:27:43,027 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:27:43,032 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3eba774a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T11:27:43,033 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7c8f0dfe{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:27:43,033 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:27:43,033 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7eb509db{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:27:43,033 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a32e967{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/hadoop.log.dir/,STOPPED} 2024-11-13T11:27:43,039 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T11:27:43,056 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T11:27:43,056 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T11:27:43,056 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/hadoop.log.dir so I do NOT create it in target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0 2024-11-13T11:27:43,056 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b8db90bd-be5e-4378-f43d-0600da24738a/hadoop.tmp.dir so I do NOT create it in target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0 2024-11-13T11:27:43,057 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf, deleteOnExit=true 2024-11-13T11:27:43,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T11:27:43,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/test.cache.data in system properties and HBase conf 2024-11-13T11:27:43,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T11:27:43,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/hadoop.log.dir in system properties and HBase conf 2024-11-13T11:27:43,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T11:27:43,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T11:27:43,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T11:27:43,057 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T11:27:43,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T11:27:43,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T11:27:43,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T11:27:43,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T11:27:43,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T11:27:43,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T11:27:43,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T11:27:43,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T11:27:43,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T11:27:43,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/nfs.dump.dir in system properties and HBase conf 2024-11-13T11:27:43,059 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/java.io.tmpdir in system properties and HBase conf 2024-11-13T11:27:43,059 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T11:27:43,059 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T11:27:43,059 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T11:27:43,070 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T11:27:43,120 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:27:43,127 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:27:43,128 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:27:43,128 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:27:43,129 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T11:27:43,129 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:27:43,131 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2893dce7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:27:43,131 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45fb0d0b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:27:43,225 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@46f5ce7a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/java.io.tmpdir/jetty-localhost-42639-hadoop-hdfs-3_4_1-tests_jar-_-any-1346378595269738005/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T11:27:43,226 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@cb6fa13{HTTP/1.1, (http/1.1)}{localhost:42639} 2024-11-13T11:27:43,226 INFO [Time-limited test {}] server.Server(415): Started @105387ms 2024-11-13T11:27:43,239 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T11:27:43,290 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:27:43,294 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:27:43,295 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:27:43,295 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:27:43,295 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T11:27:43,295 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@808171a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:27:43,296 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e8610c3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:27:43,332 INFO [regionserver/7bf281cf3991:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T11:27:43,390 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@742046cc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/java.io.tmpdir/jetty-localhost-35677-hadoop-hdfs-3_4_1-tests_jar-_-any-5147510030254169703/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:27:43,391 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@77298c30{HTTP/1.1, (http/1.1)}{localhost:35677} 2024-11-13T11:27:43,391 INFO [Time-limited test {}] server.Server(415): Started @105551ms 2024-11-13T11:27:43,392 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:27:43,420 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:27:43,426 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:27:43,427 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:27:43,427 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:27:43,427 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T11:27:43,427 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18c619fc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:27:43,428 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40fc6fac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:27:43,469 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data2/current/BP-1464557415-172.17.0.2-1731497263081/current, will proceed with Du for space computation calculation, 2024-11-13T11:27:43,469 WARN [Thread-655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data1/current/BP-1464557415-172.17.0.2-1731497263081/current, will proceed with Du for space computation calculation, 2024-11-13T11:27:43,499 WARN [Thread-634 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:27:43,506 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b8316812e1cbaeb with lease ID 0xefc7ddcf82d4f9e6: Processing first storage report for DS-2138b76d-f810-4363-a506-30a8cc6ba641 from datanode DatanodeRegistration(127.0.0.1:45561, datanodeUuid=1bc3f713-bceb-447e-ac77-c7bcae2658f8, infoPort=35179, infoSecurePort=0, ipcPort=37897, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081) 2024-11-13T11:27:43,506 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b8316812e1cbaeb with lease ID 0xefc7ddcf82d4f9e6: from storage DS-2138b76d-f810-4363-a506-30a8cc6ba641 node DatanodeRegistration(127.0.0.1:45561, datanodeUuid=1bc3f713-bceb-447e-ac77-c7bcae2658f8, infoPort=35179, infoSecurePort=0, ipcPort=37897, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:27:43,506 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b8316812e1cbaeb with lease ID 0xefc7ddcf82d4f9e6: Processing first storage report for DS-b5bcb96a-fa61-459a-bd5c-06577d8a2d60 from datanode DatanodeRegistration(127.0.0.1:45561, datanodeUuid=1bc3f713-bceb-447e-ac77-c7bcae2658f8, infoPort=35179, infoSecurePort=0, ipcPort=37897, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081) 2024-11-13T11:27:43,506 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b8316812e1cbaeb with lease ID 0xefc7ddcf82d4f9e6: from storage DS-b5bcb96a-fa61-459a-bd5c-06577d8a2d60 node DatanodeRegistration(127.0.0.1:45561, datanodeUuid=1bc3f713-bceb-447e-ac77-c7bcae2658f8, infoPort=35179, infoSecurePort=0, ipcPort=37897, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:27:43,542 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@74ca9210{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/java.io.tmpdir/jetty-localhost-36437-hadoop-hdfs-3_4_1-tests_jar-_-any-6376886876352901934/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:27:43,542 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4e60aeab{HTTP/1.1, (http/1.1)}{localhost:36437} 2024-11-13T11:27:43,543 INFO [Time-limited test {}] server.Server(415): Started @105703ms 2024-11-13T11:27:43,544 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:27:43,621 WARN [Thread-681 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data3/current/BP-1464557415-172.17.0.2-1731497263081/current, will proceed with Du for space computation calculation, 2024-11-13T11:27:43,622 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data4/current/BP-1464557415-172.17.0.2-1731497263081/current, will proceed with Du for space computation calculation, 2024-11-13T11:27:43,644 WARN [Thread-670 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:27:43,646 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x138eb0d9e7bd727b with lease ID 0xefc7ddcf82d4f9e7: Processing first storage report for DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c from datanode DatanodeRegistration(127.0.0.1:33705, datanodeUuid=d6fd65b9-3d25-4348-bf57-ab1173ebd30b, infoPort=38327, infoSecurePort=0, ipcPort=43435, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081) 2024-11-13T11:27:43,647 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x138eb0d9e7bd727b with lease ID 0xefc7ddcf82d4f9e7: from storage DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c node DatanodeRegistration(127.0.0.1:33705, datanodeUuid=d6fd65b9-3d25-4348-bf57-ab1173ebd30b, infoPort=38327, infoSecurePort=0, ipcPort=43435, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:27:43,647 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x138eb0d9e7bd727b with lease ID 0xefc7ddcf82d4f9e7: Processing first storage report for DS-a20d519f-2d5b-4f11-a5c5-b57e53a8b023 from datanode DatanodeRegistration(127.0.0.1:33705, datanodeUuid=d6fd65b9-3d25-4348-bf57-ab1173ebd30b, infoPort=38327, infoSecurePort=0, ipcPort=43435, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081) 2024-11-13T11:27:43,647 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x138eb0d9e7bd727b with lease ID 0xefc7ddcf82d4f9e7: from storage DS-a20d519f-2d5b-4f11-a5c5-b57e53a8b023 node DatanodeRegistration(127.0.0.1:33705, datanodeUuid=d6fd65b9-3d25-4348-bf57-ab1173ebd30b, infoPort=38327, infoSecurePort=0, ipcPort=43435, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:27:43,681 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0 2024-11-13T11:27:43,684 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/zookeeper_0, clientPort=59956, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T11:27:43,685 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59956 2024-11-13T11:27:43,685 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:27:43,687 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:27:43,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45561 is added to blk_1073741825_1001 (size=7) 2024-11-13T11:27:43,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33705 is added to blk_1073741825_1001 (size=7) 2024-11-13T11:27:43,700 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718 with version=8 2024-11-13T11:27:43,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/hbase-staging 2024-11-13T11:27:43,703 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7bf281cf3991:0 server-side Connection retries=45 2024-11-13T11:27:43,703 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:27:43,703 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T11:27:43,703 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T11:27:43,703 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:27:43,703 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T11:27:43,704 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T11:27:43,704 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T11:27:43,705 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37577 2024-11-13T11:27:43,707 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37577 connecting to ZooKeeper ensemble=127.0.0.1:59956 2024-11-13T11:27:43,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:375770x0, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T11:27:43,711 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37577-0x10038d6deb20000 connected 2024-11-13T11:27:43,724 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:27:43,726 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:27:43,729 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:27:43,729 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718, hbase.cluster.distributed=false 2024-11-13T11:27:43,732 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T11:27:43,738 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37577 2024-11-13T11:27:43,739 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37577 2024-11-13T11:27:43,741 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37577 2024-11-13T11:27:43,743 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37577 2024-11-13T11:27:43,743 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37577 2024-11-13T11:27:43,758 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7bf281cf3991:0 server-side Connection retries=45 2024-11-13T11:27:43,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:27:43,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T11:27:43,758 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T11:27:43,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:27:43,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T11:27:43,758 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T11:27:43,758 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T11:27:43,759 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34477 2024-11-13T11:27:43,761 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34477 connecting to ZooKeeper ensemble=127.0.0.1:59956 2024-11-13T11:27:43,761 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:27:43,763 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:27:43,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:344770x0, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T11:27:43,767 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:344770x0, quorum=127.0.0.1:59956, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:27:43,767 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34477-0x10038d6deb20001 connected 2024-11-13T11:27:43,767 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T11:27:43,768 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T11:27:43,768 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34477-0x10038d6deb20001, quorum=127.0.0.1:59956, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T11:27:43,769 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34477-0x10038d6deb20001, quorum=127.0.0.1:59956, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T11:27:43,777 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34477 2024-11-13T11:27:43,777 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34477 2024-11-13T11:27:43,778 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34477 2024-11-13T11:27:43,780 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34477 2024-11-13T11:27:43,780 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34477 2024-11-13T11:27:43,797 DEBUG [M:0;7bf281cf3991:37577 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7bf281cf3991:37577 2024-11-13T11:27:43,797 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7bf281cf3991,37577,1731497263703 2024-11-13T11:27:43,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:27:43,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34477-0x10038d6deb20001, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:27:43,799 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7bf281cf3991,37577,1731497263703 2024-11-13T11:27:43,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:43,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34477-0x10038d6deb20001, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T11:27:43,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34477-0x10038d6deb20001, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:43,801 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T11:27:43,801 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7bf281cf3991,37577,1731497263703 from backup master directory 2024-11-13T11:27:43,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7bf281cf3991,37577,1731497263703 2024-11-13T11:27:43,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:27:43,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34477-0x10038d6deb20001, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:27:43,802 WARN [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T11:27:43,802 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7bf281cf3991,37577,1731497263703 2024-11-13T11:27:43,807 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/hbase.id] with ID: 84b28292-aa6f-4831-a6b0-cf862e35c40f 2024-11-13T11:27:43,807 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/.tmp/hbase.id 2024-11-13T11:27:43,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33705 is added to blk_1073741826_1002 (size=42) 2024-11-13T11:27:43,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45561 is added to blk_1073741826_1002 (size=42) 2024-11-13T11:27:43,815 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/.tmp/hbase.id]:[hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/hbase.id] 2024-11-13T11:27:43,829 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:27:43,830 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T11:27:43,831 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-13T11:27:43,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34477-0x10038d6deb20001, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:43,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:43,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33705 is added to blk_1073741827_1003 (size=196) 2024-11-13T11:27:43,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45561 is added to blk_1073741827_1003 (size=196) 2024-11-13T11:27:43,843 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T11:27:43,844 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T11:27:43,845 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:27:43,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45561 is added to blk_1073741828_1004 (size=1189) 2024-11-13T11:27:43,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33705 is added to blk_1073741828_1004 (size=1189) 2024-11-13T11:27:43,859 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store 2024-11-13T11:27:43,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45561 is added to blk_1073741829_1005 (size=34) 2024-11-13T11:27:43,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33705 is added to blk_1073741829_1005 (size=34) 2024-11-13T11:27:43,869 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:27:43,869 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T11:27:43,869 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:27:43,869 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:27:43,869 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T11:27:43,869 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:27:43,869 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:27:43,869 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731497263869Disabling compacts and flushes for region at 1731497263869Disabling writes for close at 1731497263869Writing region close event to WAL at 1731497263869Closed at 1731497263869 2024-11-13T11:27:43,870 WARN [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/.initializing 2024-11-13T11:27:43,870 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/WALs/7bf281cf3991,37577,1731497263703 2024-11-13T11:27:43,873 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C37577%2C1731497263703, suffix=, logDir=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/WALs/7bf281cf3991,37577,1731497263703, archiveDir=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/oldWALs, maxLogs=10 2024-11-13T11:27:43,874 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C37577%2C1731497263703.1731497263874 2024-11-13T11:27:43,880 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/WALs/7bf281cf3991,37577,1731497263703/7bf281cf3991%2C37577%2C1731497263703.1731497263874 2024-11-13T11:27:43,884 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35179:35179),(127.0.0.1/127.0.0.1:38327:38327)] 2024-11-13T11:27:43,888 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T11:27:43,888 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:27:43,888 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:43,888 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:43,890 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:43,892 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T11:27:43,892 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:43,892 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:27:43,892 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:43,894 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T11:27:43,894 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:43,895 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:27:43,895 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:43,897 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T11:27:43,897 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:43,897 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:27:43,897 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:43,899 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T11:27:43,899 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:43,900 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:27:43,900 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:43,901 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:43,901 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:43,902 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:43,903 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:43,903 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T11:27:43,904 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:27:43,907 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T11:27:43,907 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=767539, jitterRate=-0.02402384579181671}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T11:27:43,908 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731497263888Initializing all the Stores at 1731497263889 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497263889Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497263890 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497263890Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497263890Cleaning up temporary data from old regions at 1731497263903 (+13 ms)Region opened successfully at 1731497263908 (+5 ms) 2024-11-13T11:27:43,908 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T11:27:43,911 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c5e286b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7bf281cf3991/172.17.0.2:0 2024-11-13T11:27:43,912 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T11:27:43,912 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T11:27:43,913 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T11:27:43,913 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T11:27:43,913 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T11:27:43,914 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T11:27:43,914 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T11:27:43,916 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T11:27:43,917 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T11:27:43,918 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T11:27:43,918 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T11:27:43,918 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T11:27:43,919 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T11:27:43,919 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T11:27:43,920 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T11:27:43,921 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T11:27:43,922 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T11:27:43,923 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T11:27:43,925 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T11:27:43,925 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T11:27:43,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T11:27:43,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:43,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34477-0x10038d6deb20001, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T11:27:43,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34477-0x10038d6deb20001, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:43,927 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7bf281cf3991,37577,1731497263703, sessionid=0x10038d6deb20000, setting cluster-up flag (Was=false) 2024-11-13T11:27:43,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34477-0x10038d6deb20001, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:43,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:43,932 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T11:27:43,933 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7bf281cf3991,37577,1731497263703 2024-11-13T11:27:43,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34477-0x10038d6deb20001, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:43,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:43,939 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T11:27:43,940 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7bf281cf3991,37577,1731497263703 2024-11-13T11:27:43,942 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T11:27:43,944 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T11:27:43,945 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T11:27:43,945 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T11:27:43,945 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7bf281cf3991,37577,1731497263703 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T11:27:43,947 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:27:43,947 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:27:43,947 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:27:43,947 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:27:43,947 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7bf281cf3991:0, corePoolSize=10, maxPoolSize=10 2024-11-13T11:27:43,947 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:43,947 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7bf281cf3991:0, corePoolSize=2, maxPoolSize=2 2024-11-13T11:27:43,947 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:43,952 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731497293952 2024-11-13T11:27:43,952 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T11:27:43,952 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T11:27:43,952 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T11:27:43,953 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T11:27:43,953 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T11:27:43,953 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T11:27:43,953 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:27:43,953 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T11:27:43,954 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:43,954 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:43,955 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T11:27:43,956 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T11:27:43,956 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T11:27:43,956 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T11:27:43,957 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T11:27:43,957 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T11:27:43,961 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497263957,5,FailOnTimeoutGroup] 2024-11-13T11:27:43,963 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497263961,5,FailOnTimeoutGroup] 2024-11-13T11:27:43,964 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:43,964 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T11:27:43,964 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:43,964 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:43,965 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T11:27:43,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T11:27:43,967 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-13T11:27:43,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45561 is added to blk_1073741831_1007 (size=1321) 2024-11-13T11:27:43,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33705 is added to blk_1073741831_1007 (size=1321) 2024-11-13T11:27:43,968 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T11:27:43,969 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718 2024-11-13T11:27:43,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33705 is added to blk_1073741832_1008 (size=32) 2024-11-13T11:27:43,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45561 is added to blk_1073741832_1008 (size=32) 2024-11-13T11:27:43,983 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:27:43,983 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.HRegionServer(746): ClusterId : 84b28292-aa6f-4831-a6b0-cf862e35c40f 2024-11-13T11:27:43,983 DEBUG [RS:0;7bf281cf3991:34477 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T11:27:43,985 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T11:27:43,985 DEBUG [RS:0;7bf281cf3991:34477 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T11:27:43,985 DEBUG [RS:0;7bf281cf3991:34477 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T11:27:43,986 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T11:27:43,986 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:43,987 DEBUG [RS:0;7bf281cf3991:34477 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T11:27:43,987 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:27:43,987 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T11:27:43,987 DEBUG [RS:0;7bf281cf3991:34477 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a3f33f1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7bf281cf3991/172.17.0.2:0 2024-11-13T11:27:43,989 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T11:27:43,989 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:43,989 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:27:43,989 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T11:27:43,991 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T11:27:43,991 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:43,992 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:27:43,992 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T11:27:43,993 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T11:27:43,993 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:43,994 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:27:43,994 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T11:27:43,995 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740 2024-11-13T11:27:43,995 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740 2024-11-13T11:27:43,997 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T11:27:43,997 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T11:27:43,997 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T11:27:43,998 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T11:27:44,001 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T11:27:44,001 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=720809, jitterRate=-0.08344447612762451}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T11:27:44,002 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731497263983Initializing all the Stores at 1731497263984 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497263984Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497263985 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497263985Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497263985Cleaning up temporary data from old regions at 1731497263997 (+12 ms)Region opened successfully at 1731497264002 (+5 ms) 2024-11-13T11:27:44,002 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T11:27:44,002 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T11:27:44,002 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T11:27:44,002 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T11:27:44,002 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T11:27:44,003 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T11:27:44,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731497264002Disabling compacts and flushes for region at 1731497264002Disabling writes for close at 1731497264002Writing region close event to WAL at 1731497264003 (+1 ms)Closed at 1731497264003 2024-11-13T11:27:44,004 DEBUG [RS:0;7bf281cf3991:34477 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7bf281cf3991:34477 2024-11-13T11:27:44,004 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T11:27:44,004 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T11:27:44,004 DEBUG [RS:0;7bf281cf3991:34477 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T11:27:44,004 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:27:44,005 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T11:27:44,005 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.HRegionServer(2659): reportForDuty to master=7bf281cf3991,37577,1731497263703 with port=34477, startcode=1731497263757 2024-11-13T11:27:44,005 DEBUG [RS:0;7bf281cf3991:34477 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T11:27:44,005 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T11:27:44,007 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T11:27:44,007 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49849, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T11:27:44,008 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37577 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7bf281cf3991,34477,1731497263757 2024-11-13T11:27:44,008 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37577 {}] master.ServerManager(517): Registering regionserver=7bf281cf3991,34477,1731497263757 2024-11-13T11:27:44,009 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T11:27:44,009 DEBUG [RS:0;7bf281cf3991:34477 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718 2024-11-13T11:27:44,010 DEBUG [RS:0;7bf281cf3991:34477 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35763 2024-11-13T11:27:44,010 DEBUG [RS:0;7bf281cf3991:34477 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T11:27:44,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T11:27:44,012 DEBUG [RS:0;7bf281cf3991:34477 {}] zookeeper.ZKUtil(111): regionserver:34477-0x10038d6deb20001, quorum=127.0.0.1:59956, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7bf281cf3991,34477,1731497263757 2024-11-13T11:27:44,012 WARN [RS:0;7bf281cf3991:34477 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T11:27:44,012 INFO [RS:0;7bf281cf3991:34477 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:27:44,012 DEBUG [RS:0;7bf281cf3991:34477 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757 2024-11-13T11:27:44,014 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7bf281cf3991,34477,1731497263757] 2024-11-13T11:27:44,021 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T11:27:44,024 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T11:27:44,025 INFO [RS:0;7bf281cf3991:34477 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T11:27:44,025 INFO [RS:0;7bf281cf3991:34477 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,029 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T11:27:44,033 INFO [RS:0;7bf281cf3991:34477 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T11:27:44,033 INFO [RS:0;7bf281cf3991:34477 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,033 DEBUG [RS:0;7bf281cf3991:34477 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,034 DEBUG [RS:0;7bf281cf3991:34477 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,034 DEBUG [RS:0;7bf281cf3991:34477 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,034 DEBUG [RS:0;7bf281cf3991:34477 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,034 DEBUG [RS:0;7bf281cf3991:34477 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,034 DEBUG [RS:0;7bf281cf3991:34477 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7bf281cf3991:0, corePoolSize=2, maxPoolSize=2 2024-11-13T11:27:44,034 DEBUG [RS:0;7bf281cf3991:34477 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,034 DEBUG [RS:0;7bf281cf3991:34477 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,034 DEBUG [RS:0;7bf281cf3991:34477 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,034 DEBUG [RS:0;7bf281cf3991:34477 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,034 DEBUG [RS:0;7bf281cf3991:34477 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,034 DEBUG [RS:0;7bf281cf3991:34477 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,034 DEBUG [RS:0;7bf281cf3991:34477 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7bf281cf3991:0, corePoolSize=3, maxPoolSize=3 2024-11-13T11:27:44,034 DEBUG [RS:0;7bf281cf3991:34477 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0, corePoolSize=3, maxPoolSize=3 2024-11-13T11:27:44,041 INFO [RS:0;7bf281cf3991:34477 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,041 INFO [RS:0;7bf281cf3991:34477 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,041 INFO [RS:0;7bf281cf3991:34477 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,041 INFO [RS:0;7bf281cf3991:34477 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,042 INFO [RS:0;7bf281cf3991:34477 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,042 INFO [RS:0;7bf281cf3991:34477 {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,34477,1731497263757-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T11:27:44,059 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T11:27:44,059 INFO [RS:0;7bf281cf3991:34477 {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,34477,1731497263757-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,060 INFO [RS:0;7bf281cf3991:34477 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,060 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.Replication(171): 7bf281cf3991,34477,1731497263757 started 2024-11-13T11:27:44,076 INFO [RS:0;7bf281cf3991:34477 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,077 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.HRegionServer(1482): Serving as 7bf281cf3991,34477,1731497263757, RpcServer on 7bf281cf3991/172.17.0.2:34477, sessionid=0x10038d6deb20001 2024-11-13T11:27:44,077 DEBUG [RS:0;7bf281cf3991:34477 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T11:27:44,077 DEBUG [RS:0;7bf281cf3991:34477 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7bf281cf3991,34477,1731497263757 2024-11-13T11:27:44,077 DEBUG [RS:0;7bf281cf3991:34477 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7bf281cf3991,34477,1731497263757' 2024-11-13T11:27:44,077 DEBUG [RS:0;7bf281cf3991:34477 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T11:27:44,078 DEBUG [RS:0;7bf281cf3991:34477 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T11:27:44,078 DEBUG [RS:0;7bf281cf3991:34477 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T11:27:44,078 DEBUG [RS:0;7bf281cf3991:34477 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T11:27:44,078 DEBUG [RS:0;7bf281cf3991:34477 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7bf281cf3991,34477,1731497263757 2024-11-13T11:27:44,078 DEBUG [RS:0;7bf281cf3991:34477 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7bf281cf3991,34477,1731497263757' 2024-11-13T11:27:44,078 DEBUG [RS:0;7bf281cf3991:34477 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T11:27:44,079 DEBUG [RS:0;7bf281cf3991:34477 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T11:27:44,079 DEBUG [RS:0;7bf281cf3991:34477 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T11:27:44,079 INFO [RS:0;7bf281cf3991:34477 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T11:27:44,079 INFO [RS:0;7bf281cf3991:34477 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T11:27:44,159 WARN [7bf281cf3991:37577 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T11:27:44,182 INFO [RS:0;7bf281cf3991:34477 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C34477%2C1731497263757, suffix=, logDir=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757, archiveDir=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/oldWALs, maxLogs=32 2024-11-13T11:27:44,182 INFO [RS:0;7bf281cf3991:34477 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C34477%2C1731497263757.1731497264182 2024-11-13T11:27:44,189 INFO [RS:0;7bf281cf3991:34477 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497264182 2024-11-13T11:27:44,193 DEBUG [RS:0;7bf281cf3991:34477 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38327:38327),(127.0.0.1/127.0.0.1:35179:35179)] 2024-11-13T11:27:44,410 DEBUG [7bf281cf3991:37577 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T11:27:44,411 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7bf281cf3991,34477,1731497263757 2024-11-13T11:27:44,414 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7bf281cf3991,34477,1731497263757, state=OPENING 2024-11-13T11:27:44,416 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T11:27:44,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34477-0x10038d6deb20001, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:44,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:27:44,419 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T11:27:44,419 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:27:44,419 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:27:44,419 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7bf281cf3991,34477,1731497263757}] 2024-11-13T11:27:44,439 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:27:44,450 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:27:44,576 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T11:27:44,580 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56379, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T11:27:44,588 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T11:27:44,588 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:27:44,592 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C34477%2C1731497263757.meta, suffix=.meta, logDir=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757, archiveDir=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/oldWALs, maxLogs=32 2024-11-13T11:27:44,594 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta 2024-11-13T11:27:44,603 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta 2024-11-13T11:27:44,606 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35179:35179),(127.0.0.1/127.0.0.1:38327:38327)] 2024-11-13T11:27:44,609 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T11:27:44,610 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T11:27:44,610 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T11:27:44,610 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T11:27:44,610 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T11:27:44,610 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:27:44,610 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T11:27:44,610 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T11:27:44,612 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T11:27:44,613 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T11:27:44,613 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:44,614 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:27:44,614 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T11:27:44,615 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T11:27:44,615 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:44,616 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:27:44,616 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T11:27:44,616 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T11:27:44,617 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:44,617 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:27:44,617 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T11:27:44,618 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T11:27:44,618 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:44,618 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:27:44,619 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T11:27:44,620 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740 2024-11-13T11:27:44,621 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740 2024-11-13T11:27:44,622 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T11:27:44,622 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T11:27:44,623 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T11:27:44,624 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T11:27:44,625 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=875998, jitterRate=0.11388939619064331}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T11:27:44,626 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T11:27:44,626 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731497264611Writing region info on filesystem at 1731497264611Initializing all the Stores at 1731497264612 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497264612Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497264612Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497264612Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497264612Cleaning up temporary data from old regions at 1731497264622 (+10 ms)Running coprocessor post-open hooks at 1731497264626 (+4 ms)Region opened successfully at 1731497264626 2024-11-13T11:27:44,628 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731497264575 2024-11-13T11:27:44,630 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T11:27:44,630 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T11:27:44,631 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7bf281cf3991,34477,1731497263757 2024-11-13T11:27:44,632 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7bf281cf3991,34477,1731497263757, state=OPEN 2024-11-13T11:27:44,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T11:27:44,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34477-0x10038d6deb20001, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T11:27:44,635 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7bf281cf3991,34477,1731497263757 2024-11-13T11:27:44,635 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:27:44,635 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:27:44,638 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T11:27:44,638 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7bf281cf3991,34477,1731497263757 in 216 msec 2024-11-13T11:27:44,642 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T11:27:44,642 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 633 msec 2024-11-13T11:27:44,643 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:27:44,643 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T11:27:44,645 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T11:27:44,645 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7bf281cf3991,34477,1731497263757, seqNum=-1] 2024-11-13T11:27:44,645 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T11:27:44,646 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41853, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T11:27:44,654 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 708 msec 2024-11-13T11:27:44,654 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731497264654, completionTime=-1 2024-11-13T11:27:44,654 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T11:27:44,654 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T11:27:44,656 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T11:27:44,656 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731497324656 2024-11-13T11:27:44,656 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731497384656 2024-11-13T11:27:44,656 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-13T11:27:44,656 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,37577,1731497263703-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,656 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,37577,1731497263703-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,656 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,37577,1731497263703-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,656 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7bf281cf3991:37577, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,657 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,657 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,659 DEBUG [master/7bf281cf3991:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T11:27:44,661 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.859sec 2024-11-13T11:27:44,662 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T11:27:44,662 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T11:27:44,662 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T11:27:44,662 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T11:27:44,662 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T11:27:44,662 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,37577,1731497263703-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T11:27:44,662 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,37577,1731497263703-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T11:27:44,664 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T11:27:44,664 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T11:27:44,665 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,37577,1731497263703-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,683 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17a802dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:27:44,683 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7bf281cf3991,37577,-1 for getting cluster id 2024-11-13T11:27:44,683 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T11:27:44,685 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '84b28292-aa6f-4831-a6b0-cf862e35c40f' 2024-11-13T11:27:44,685 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T11:27:44,686 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "84b28292-aa6f-4831-a6b0-cf862e35c40f" 2024-11-13T11:27:44,686 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3da31ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:27:44,686 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7bf281cf3991,37577,-1] 2024-11-13T11:27:44,686 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T11:27:44,687 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:27:44,688 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36400, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T11:27:44,689 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d092a64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:27:44,689 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T11:27:44,690 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7bf281cf3991,34477,1731497263757, seqNum=-1] 2024-11-13T11:27:44,690 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T11:27:44,692 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50206, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T11:27:44,693 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7bf281cf3991,37577,1731497263703 2024-11-13T11:27:44,693 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:27:44,696 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T11:27:44,714 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7bf281cf3991:0 server-side Connection retries=45 2024-11-13T11:27:44,714 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:27:44,714 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T11:27:44,714 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T11:27:44,714 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:27:44,714 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T11:27:44,714 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T11:27:44,714 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T11:27:44,715 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38159 2024-11-13T11:27:44,717 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38159 connecting to ZooKeeper ensemble=127.0.0.1:59956 2024-11-13T11:27:44,717 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:27:44,719 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:27:44,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:381590x0, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T11:27:44,723 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:381590x0, quorum=127.0.0.1:59956, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-13T11:27:44,723 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-13T11:27:44,723 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38159-0x10038d6deb20002 connected 2024-11-13T11:27:44,724 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T11:27:44,724 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T11:27:44,725 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:38159-0x10038d6deb20002, quorum=127.0.0.1:59956, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T11:27:44,726 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38159-0x10038d6deb20002, quorum=127.0.0.1:59956, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T11:27:44,727 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38159 2024-11-13T11:27:44,749 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38159 2024-11-13T11:27:44,749 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38159 2024-11-13T11:27:44,750 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38159 2024-11-13T11:27:44,751 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38159 2024-11-13T11:27:44,753 INFO [RS:1;7bf281cf3991:38159 {}] regionserver.HRegionServer(746): ClusterId : 84b28292-aa6f-4831-a6b0-cf862e35c40f 2024-11-13T11:27:44,753 DEBUG [RS:1;7bf281cf3991:38159 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T11:27:44,755 DEBUG [RS:1;7bf281cf3991:38159 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T11:27:44,755 DEBUG [RS:1;7bf281cf3991:38159 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T11:27:44,756 DEBUG [RS:1;7bf281cf3991:38159 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T11:27:44,757 DEBUG [RS:1;7bf281cf3991:38159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@712a7604, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7bf281cf3991/172.17.0.2:0 2024-11-13T11:27:44,770 DEBUG [RS:1;7bf281cf3991:38159 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;7bf281cf3991:38159 2024-11-13T11:27:44,770 INFO [RS:1;7bf281cf3991:38159 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T11:27:44,770 INFO [RS:1;7bf281cf3991:38159 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T11:27:44,770 DEBUG [RS:1;7bf281cf3991:38159 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T11:27:44,771 INFO [RS:1;7bf281cf3991:38159 {}] regionserver.HRegionServer(2659): reportForDuty to master=7bf281cf3991,37577,1731497263703 with port=38159, startcode=1731497264714 2024-11-13T11:27:44,771 DEBUG [RS:1;7bf281cf3991:38159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T11:27:44,773 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60481, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T11:27:44,774 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37577 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7bf281cf3991,38159,1731497264714 2024-11-13T11:27:44,774 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37577 {}] master.ServerManager(517): Registering regionserver=7bf281cf3991,38159,1731497264714 2024-11-13T11:27:44,775 DEBUG [RS:1;7bf281cf3991:38159 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718 2024-11-13T11:27:44,775 DEBUG [RS:1;7bf281cf3991:38159 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35763 2024-11-13T11:27:44,775 DEBUG [RS:1;7bf281cf3991:38159 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T11:27:44,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T11:27:44,777 DEBUG [RS:1;7bf281cf3991:38159 {}] zookeeper.ZKUtil(111): regionserver:38159-0x10038d6deb20002, quorum=127.0.0.1:59956, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7bf281cf3991,38159,1731497264714 2024-11-13T11:27:44,777 WARN [RS:1;7bf281cf3991:38159 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T11:27:44,777 INFO [RS:1;7bf281cf3991:38159 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:27:44,777 DEBUG [RS:1;7bf281cf3991:38159 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714 2024-11-13T11:27:44,777 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7bf281cf3991,38159,1731497264714] 2024-11-13T11:27:44,781 INFO [RS:1;7bf281cf3991:38159 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T11:27:44,782 INFO [RS:1;7bf281cf3991:38159 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T11:27:44,783 INFO [RS:1;7bf281cf3991:38159 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T11:27:44,783 INFO [RS:1;7bf281cf3991:38159 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,784 INFO [RS:1;7bf281cf3991:38159 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T11:27:44,785 INFO [RS:1;7bf281cf3991:38159 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T11:27:44,785 INFO [RS:1;7bf281cf3991:38159 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,785 DEBUG [RS:1;7bf281cf3991:38159 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,785 DEBUG [RS:1;7bf281cf3991:38159 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,785 DEBUG [RS:1;7bf281cf3991:38159 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,785 DEBUG [RS:1;7bf281cf3991:38159 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,785 DEBUG [RS:1;7bf281cf3991:38159 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,785 DEBUG [RS:1;7bf281cf3991:38159 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7bf281cf3991:0, corePoolSize=2, maxPoolSize=2 2024-11-13T11:27:44,785 DEBUG [RS:1;7bf281cf3991:38159 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,786 DEBUG [RS:1;7bf281cf3991:38159 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,786 DEBUG [RS:1;7bf281cf3991:38159 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,786 DEBUG [RS:1;7bf281cf3991:38159 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,786 DEBUG [RS:1;7bf281cf3991:38159 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,786 DEBUG [RS:1;7bf281cf3991:38159 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:27:44,786 DEBUG [RS:1;7bf281cf3991:38159 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7bf281cf3991:0, corePoolSize=3, maxPoolSize=3 2024-11-13T11:27:44,786 DEBUG [RS:1;7bf281cf3991:38159 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0, corePoolSize=3, maxPoolSize=3 2024-11-13T11:27:44,786 INFO [RS:1;7bf281cf3991:38159 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,786 INFO [RS:1;7bf281cf3991:38159 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,786 INFO [RS:1;7bf281cf3991:38159 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,786 INFO [RS:1;7bf281cf3991:38159 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,787 INFO [RS:1;7bf281cf3991:38159 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,787 INFO [RS:1;7bf281cf3991:38159 {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,38159,1731497264714-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T11:27:44,802 INFO [RS:1;7bf281cf3991:38159 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T11:27:44,802 INFO [RS:1;7bf281cf3991:38159 {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,38159,1731497264714-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,802 INFO [RS:1;7bf281cf3991:38159 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,802 INFO [RS:1;7bf281cf3991:38159 {}] regionserver.Replication(171): 7bf281cf3991,38159,1731497264714 started 2024-11-13T11:27:44,815 INFO [RS:1;7bf281cf3991:38159 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:27:44,815 INFO [RS:1;7bf281cf3991:38159 {}] regionserver.HRegionServer(1482): Serving as 7bf281cf3991,38159,1731497264714, RpcServer on 7bf281cf3991/172.17.0.2:38159, sessionid=0x10038d6deb20002 2024-11-13T11:27:44,815 DEBUG [RS:1;7bf281cf3991:38159 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T11:27:44,815 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;7bf281cf3991:38159,5,FailOnTimeoutGroup] 2024-11-13T11:27:44,815 DEBUG [RS:1;7bf281cf3991:38159 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7bf281cf3991,38159,1731497264714 2024-11-13T11:27:44,816 DEBUG [RS:1;7bf281cf3991:38159 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7bf281cf3991,38159,1731497264714' 2024-11-13T11:27:44,816 DEBUG [RS:1;7bf281cf3991:38159 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T11:27:44,816 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-13T11:27:44,816 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T11:27:44,816 DEBUG [RS:1;7bf281cf3991:38159 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T11:27:44,817 DEBUG [RS:1;7bf281cf3991:38159 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T11:27:44,817 DEBUG [RS:1;7bf281cf3991:38159 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T11:27:44,817 DEBUG [RS:1;7bf281cf3991:38159 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7bf281cf3991,38159,1731497264714 2024-11-13T11:27:44,817 DEBUG [RS:1;7bf281cf3991:38159 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7bf281cf3991,38159,1731497264714' 2024-11-13T11:27:44,817 DEBUG [RS:1;7bf281cf3991:38159 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T11:27:44,817 DEBUG [RS:1;7bf281cf3991:38159 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T11:27:44,818 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 7bf281cf3991,37577,1731497263703 2024-11-13T11:27:44,818 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@69d33494 2024-11-13T11:27:44,818 DEBUG [RS:1;7bf281cf3991:38159 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T11:27:44,818 INFO [RS:1;7bf281cf3991:38159 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T11:27:44,818 INFO [RS:1;7bf281cf3991:38159 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T11:27:44,818 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T11:27:44,820 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36414, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T11:27:44,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37577 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-13T11:27:44,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37577 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-13T11:27:44,821 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37577 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T11:27:44,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37577 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-13T11:27:44,824 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T11:27:44,824 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:44,824 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37577 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-13T11:27:44,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T11:27:44,825 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T11:27:44,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45561 is added to blk_1073741835_1011 (size=393) 2024-11-13T11:27:44,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33705 is added to blk_1073741835_1011 (size=393) 2024-11-13T11:27:44,834 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c670657098f880144481eec91e517476, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718 2024-11-13T11:27:44,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45561 is added to blk_1073741836_1012 (size=76) 2024-11-13T11:27:44,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33705 is added to blk_1073741836_1012 (size=76) 2024-11-13T11:27:44,841 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:27:44,841 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing c670657098f880144481eec91e517476, disabling compactions & flushes 2024-11-13T11:27:44,841 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476. 2024-11-13T11:27:44,841 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476. 2024-11-13T11:27:44,841 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476. after waiting 0 ms 2024-11-13T11:27:44,841 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476. 2024-11-13T11:27:44,841 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476. 2024-11-13T11:27:44,841 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for c670657098f880144481eec91e517476: Waiting for close lock at 1731497264841Disabling compacts and flushes for region at 1731497264841Disabling writes for close at 1731497264841Writing region close event to WAL at 1731497264841Closed at 1731497264841 2024-11-13T11:27:44,843 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T11:27:44,843 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731497264843"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731497264843"}]},"ts":"1731497264843"} 2024-11-13T11:27:44,845 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T11:27:44,846 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T11:27:44,847 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731497264847"}]},"ts":"1731497264847"} 2024-11-13T11:27:44,849 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-13T11:27:44,849 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=c670657098f880144481eec91e517476, ASSIGN}] 2024-11-13T11:27:44,851 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=c670657098f880144481eec91e517476, ASSIGN 2024-11-13T11:27:44,852 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=c670657098f880144481eec91e517476, ASSIGN; state=OFFLINE, location=7bf281cf3991,34477,1731497263757; forceNewPlan=false, retain=false 2024-11-13T11:27:44,923 INFO [RS:1;7bf281cf3991:38159 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C38159%2C1731497264714, suffix=, logDir=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714, archiveDir=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/oldWALs, maxLogs=32 2024-11-13T11:27:44,925 INFO [RS:1;7bf281cf3991:38159 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C38159%2C1731497264714.1731497264924 2024-11-13T11:27:44,933 INFO [RS:1;7bf281cf3991:38159 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 2024-11-13T11:27:44,935 DEBUG [RS:1;7bf281cf3991:38159 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35179:35179),(127.0.0.1/127.0.0.1:38327:38327)] 2024-11-13T11:27:44,965 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T11:27:44,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:27:44,990 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:27:44,993 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:27:44,994 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:27:45,003 INFO [7bf281cf3991:37577 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-13T11:27:45,003 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c670657098f880144481eec91e517476, regionState=OPENING, regionLocation=7bf281cf3991,34477,1731497263757 2024-11-13T11:27:45,006 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=c670657098f880144481eec91e517476, ASSIGN because future has completed 2024-11-13T11:27:45,006 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c670657098f880144481eec91e517476, server=7bf281cf3991,34477,1731497263757}] 2024-11-13T11:27:45,164 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476. 2024-11-13T11:27:45,165 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c670657098f880144481eec91e517476, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476.', STARTKEY => '', ENDKEY => ''} 2024-11-13T11:27:45,166 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath c670657098f880144481eec91e517476 2024-11-13T11:27:45,166 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:27:45,166 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c670657098f880144481eec91e517476 2024-11-13T11:27:45,166 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c670657098f880144481eec91e517476 2024-11-13T11:27:45,168 INFO [StoreOpener-c670657098f880144481eec91e517476-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c670657098f880144481eec91e517476 2024-11-13T11:27:45,170 INFO [StoreOpener-c670657098f880144481eec91e517476-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c670657098f880144481eec91e517476 columnFamilyName info 2024-11-13T11:27:45,171 DEBUG [StoreOpener-c670657098f880144481eec91e517476-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:27:45,171 INFO [StoreOpener-c670657098f880144481eec91e517476-1 {}] regionserver.HStore(327): Store=c670657098f880144481eec91e517476/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:27:45,172 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c670657098f880144481eec91e517476 2024-11-13T11:27:45,173 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476 2024-11-13T11:27:45,173 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476 2024-11-13T11:27:45,174 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c670657098f880144481eec91e517476 2024-11-13T11:27:45,174 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c670657098f880144481eec91e517476 2024-11-13T11:27:45,176 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c670657098f880144481eec91e517476 2024-11-13T11:27:45,178 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T11:27:45,179 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c670657098f880144481eec91e517476; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=762339, jitterRate=-0.030636325478553772}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T11:27:45,179 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c670657098f880144481eec91e517476 2024-11-13T11:27:45,179 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c670657098f880144481eec91e517476: Running coprocessor pre-open hook at 1731497265166Writing region info on filesystem at 1731497265166Initializing all the Stores at 1731497265168 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497265168Cleaning up temporary data from old regions at 1731497265174 (+6 ms)Running coprocessor post-open hooks at 1731497265179 (+5 ms)Region opened successfully at 1731497265179 2024-11-13T11:27:45,180 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476., pid=6, masterSystemTime=1731497265159 2024-11-13T11:27:45,183 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476. 2024-11-13T11:27:45,183 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476. 2024-11-13T11:27:45,184 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c670657098f880144481eec91e517476, regionState=OPEN, openSeqNum=2, regionLocation=7bf281cf3991,34477,1731497263757 2024-11-13T11:27:45,187 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c670657098f880144481eec91e517476, server=7bf281cf3991,34477,1731497263757 because future has completed 2024-11-13T11:27:45,190 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T11:27:45,190 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c670657098f880144481eec91e517476, server=7bf281cf3991,34477,1731497263757 in 182 msec 2024-11-13T11:27:45,193 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T11:27:45,193 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=c670657098f880144481eec91e517476, ASSIGN in 341 msec 2024-11-13T11:27:45,194 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T11:27:45,195 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731497265195"}]},"ts":"1731497265195"} 2024-11-13T11:27:45,197 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-13T11:27:45,199 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T11:27:45,201 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 378 msec 2024-11-13T11:27:50,116 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T11:27:50,122 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:27:50,141 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:27:50,143 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:27:50,143 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:27:50,151 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-13T11:27:53,965 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T11:27:53,965 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T11:27:53,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-13T11:27:53,966 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-13T11:27:53,967 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T11:27:53,967 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T11:27:54,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T11:27:54,901 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-13T11:27:54,901 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-13T11:27:54,906 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-13T11:27:54,906 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476. 2024-11-13T11:27:54,921 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:27:54,924 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:27:54,925 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:27:54,925 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:27:54,925 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T11:27:54,926 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@365affa2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:27:54,926 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19c16326{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:27:55,021 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@411972d6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/java.io.tmpdir/jetty-localhost-35009-hadoop-hdfs-3_4_1-tests_jar-_-any-17179191537254146022/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:27:55,022 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ffd409{HTTP/1.1, (http/1.1)}{localhost:35009} 2024-11-13T11:27:55,022 INFO [Time-limited test {}] server.Server(415): Started @117183ms 2024-11-13T11:27:55,023 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:27:55,054 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:27:55,058 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:27:55,059 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:27:55,059 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:27:55,059 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T11:27:55,059 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e942ed{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:27:55,060 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6836669f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:27:55,083 WARN [Thread-827 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data5/current/BP-1464557415-172.17.0.2-1731497263081/current, will proceed with Du for space computation calculation, 2024-11-13T11:27:55,083 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data6/current/BP-1464557415-172.17.0.2-1731497263081/current, will proceed with Du for space computation calculation, 2024-11-13T11:27:55,104 WARN [Thread-807 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:27:55,107 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x769c079498eead with lease ID 0xefc7ddcf82d4f9e8: Processing first storage report for DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65 from datanode DatanodeRegistration(127.0.0.1:45033, datanodeUuid=6bdf1212-1246-456d-913a-4423cf8745fb, infoPort=39425, infoSecurePort=0, ipcPort=42845, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081) 2024-11-13T11:27:55,107 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x769c079498eead with lease ID 0xefc7ddcf82d4f9e8: from storage DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65 node DatanodeRegistration(127.0.0.1:45033, datanodeUuid=6bdf1212-1246-456d-913a-4423cf8745fb, infoPort=39425, infoSecurePort=0, ipcPort=42845, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:27:55,107 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x769c079498eead with lease ID 0xefc7ddcf82d4f9e8: Processing first storage report for DS-aa7c3efd-31d1-4856-bb45-33d4da6f5f5f from datanode DatanodeRegistration(127.0.0.1:45033, datanodeUuid=6bdf1212-1246-456d-913a-4423cf8745fb, infoPort=39425, infoSecurePort=0, ipcPort=42845, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081) 2024-11-13T11:27:55,107 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x769c079498eead with lease ID 0xefc7ddcf82d4f9e8: from storage DS-aa7c3efd-31d1-4856-bb45-33d4da6f5f5f node DatanodeRegistration(127.0.0.1:45033, datanodeUuid=6bdf1212-1246-456d-913a-4423cf8745fb, infoPort=39425, infoSecurePort=0, ipcPort=42845, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:27:55,161 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7e04641a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/java.io.tmpdir/jetty-localhost-36297-hadoop-hdfs-3_4_1-tests_jar-_-any-16363270265703956204/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:27:55,162 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c24d977{HTTP/1.1, (http/1.1)}{localhost:36297} 2024-11-13T11:27:55,162 INFO [Time-limited test {}] server.Server(415): Started @117322ms 2024-11-13T11:27:55,163 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:27:55,197 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:27:55,200 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:27:55,201 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:27:55,202 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:27:55,202 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T11:27:55,202 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@110505ba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:27:55,203 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45910ffe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:27:55,225 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data8/current/BP-1464557415-172.17.0.2-1731497263081/current, will proceed with Du for space computation calculation, 2024-11-13T11:27:55,225 WARN [Thread-862 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data7/current/BP-1464557415-172.17.0.2-1731497263081/current, will proceed with Du for space computation calculation, 2024-11-13T11:27:55,243 WARN [Thread-842 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:27:55,245 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7d03d6b75acfaeb with lease ID 0xefc7ddcf82d4f9e9: Processing first storage report for DS-830be915-8237-433c-9353-ddc0a0f05e0e from datanode DatanodeRegistration(127.0.0.1:32897, datanodeUuid=7945fe28-0dcd-43f5-9a19-284b7dda4cd2, infoPort=40427, infoSecurePort=0, ipcPort=40667, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081) 2024-11-13T11:27:55,245 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7d03d6b75acfaeb with lease ID 0xefc7ddcf82d4f9e9: from storage DS-830be915-8237-433c-9353-ddc0a0f05e0e node DatanodeRegistration(127.0.0.1:32897, datanodeUuid=7945fe28-0dcd-43f5-9a19-284b7dda4cd2, infoPort=40427, infoSecurePort=0, ipcPort=40667, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:27:55,246 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7d03d6b75acfaeb with lease ID 0xefc7ddcf82d4f9e9: Processing first storage report for DS-5e93c4a5-040c-4987-b00c-2175efd632a7 from datanode DatanodeRegistration(127.0.0.1:32897, datanodeUuid=7945fe28-0dcd-43f5-9a19-284b7dda4cd2, infoPort=40427, infoSecurePort=0, ipcPort=40667, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081) 2024-11-13T11:27:55,246 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7d03d6b75acfaeb with lease ID 0xefc7ddcf82d4f9e9: from storage DS-5e93c4a5-040c-4987-b00c-2175efd632a7 node DatanodeRegistration(127.0.0.1:32897, datanodeUuid=7945fe28-0dcd-43f5-9a19-284b7dda4cd2, infoPort=40427, infoSecurePort=0, ipcPort=40667, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:27:55,309 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c0171f7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/java.io.tmpdir/jetty-localhost-43983-hadoop-hdfs-3_4_1-tests_jar-_-any-5941464598701492953/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:27:55,310 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@602792b0{HTTP/1.1, (http/1.1)}{localhost:43983} 2024-11-13T11:27:55,310 INFO [Time-limited test {}] server.Server(415): Started @117470ms 2024-11-13T11:27:55,311 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:27:55,369 WARN [Thread-888 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data9/current/BP-1464557415-172.17.0.2-1731497263081/current, will proceed with Du for space computation calculation, 2024-11-13T11:27:55,369 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data10/current/BP-1464557415-172.17.0.2-1731497263081/current, will proceed with Du for space computation calculation, 2024-11-13T11:27:55,388 WARN [Thread-877 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:27:55,391 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcefd4e0ac8c66855 with lease ID 0xefc7ddcf82d4f9ea: Processing first storage report for DS-79dffad5-8135-4511-bcb2-8d51955d404d from datanode DatanodeRegistration(127.0.0.1:44657, datanodeUuid=c3f81e58-37b0-430e-8b46-34cc3ca39932, infoPort=33291, infoSecurePort=0, ipcPort=41597, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081) 2024-11-13T11:27:55,391 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcefd4e0ac8c66855 with lease ID 0xefc7ddcf82d4f9ea: from storage DS-79dffad5-8135-4511-bcb2-8d51955d404d node DatanodeRegistration(127.0.0.1:44657, datanodeUuid=c3f81e58-37b0-430e-8b46-34cc3ca39932, infoPort=33291, infoSecurePort=0, ipcPort=41597, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:27:55,391 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcefd4e0ac8c66855 with lease ID 0xefc7ddcf82d4f9ea: Processing first storage report for DS-cfd10a24-78cd-4cf4-a634-a12efa39f209 from datanode DatanodeRegistration(127.0.0.1:44657, datanodeUuid=c3f81e58-37b0-430e-8b46-34cc3ca39932, infoPort=33291, infoSecurePort=0, ipcPort=41597, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081) 2024-11-13T11:27:55,391 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcefd4e0ac8c66855 with lease ID 0xefc7ddcf82d4f9ea: from storage DS-cfd10a24-78cd-4cf4-a634-a12efa39f209 node DatanodeRegistration(127.0.0.1:44657, datanodeUuid=c3f81e58-37b0-430e-8b46-34cc3ca39932, infoPort=33291, infoSecurePort=0, ipcPort=41597, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:27:55,431 WARN [ResponseProcessor for block BP-1464557415-172.17.0.2-1731497263081:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1464557415-172.17.0.2-1731497263081:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:55,431 WARN [ResponseProcessor for block BP-1464557415-172.17.0.2-1731497263081:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1464557415-172.17.0.2-1731497263081:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-1464557415-172.17.0.2-1731497263081:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:55,431 WARN [ResponseProcessor for block BP-1464557415-172.17.0.2-1731497263081:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1464557415-172.17.0.2-1731497263081:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1464557415-172.17.0.2-1731497263081:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:55,432 WARN [DataStreamer for file /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 block BP-1464557415-172.17.0.2-1731497263081:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK], DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]) is bad. 2024-11-13T11:27:55,432 WARN [ResponseProcessor for block BP-1464557415-172.17.0.2-1731497263081:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1464557415-172.17.0.2-1731497263081:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1464557415-172.17.0.2-1731497263081:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:55,432 WARN [DataStreamer for file /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497264182 block BP-1464557415-172.17.0.2-1731497263081:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK], DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]) is bad. 2024-11-13T11:27:55,432 WARN [DataStreamer for file /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta block BP-1464557415-172.17.0.2-1731497263081:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK], DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]) is bad. 2024-11-13T11:27:55,432 WARN [DataStreamer for file /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/WALs/7bf281cf3991,37577,1731497263703/7bf281cf3991%2C37577%2C1731497263703.1731497263874 block BP-1464557415-172.17.0.2-1731497263081:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK], DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]) is bad. 2024-11-13T11:27:55,432 WARN [PacketResponder: BP-1464557415-172.17.0.2-1731497263081:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33705] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:27:55,432 WARN [PacketResponder: BP-1464557415-172.17.0.2-1731497263081:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33705] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:27:55,433 WARN [PacketResponder: BP-1464557415-172.17.0.2-1731497263081:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33705] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:27:55,435 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:43846 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45561:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43846 dst: /127.0.0.1:45561 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:27:55,435 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@74ca9210{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:27:55,435 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1589112119_22 at /127.0.0.1:43804 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45561:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43804 dst: /127.0.0.1:45561 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:27:55,435 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2072322438_22 at /127.0.0.1:43864 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:45561:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43864 dst: /127.0.0.1:45561 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:27:55,435 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:39678 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33705:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39678 dst: /127.0.0.1:33705 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:27:55,435 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:39662 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33705:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39662 dst: /127.0.0.1:33705 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:27:55,436 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:43830 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45561:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43830 dst: /127.0.0.1:45561 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:27:55,437 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4e60aeab{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:27:55,437 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:27:55,437 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1589112119_22 at /127.0.0.1:39650 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33705:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39650 dst: /127.0.0.1:33705 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:27:55,437 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40fc6fac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:27:55,438 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18c619fc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/hadoop.log.dir/,STOPPED} 2024-11-13T11:27:55,437 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2072322438_22 at /127.0.0.1:39702 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:33705:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39702 dst: /127.0.0.1:33705 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:27:55,439 WARN [BP-1464557415-172.17.0.2-1731497263081 heartbeating to localhost/127.0.0.1:35763 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:27:55,439 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:27:55,439 WARN [BP-1464557415-172.17.0.2-1731497263081 heartbeating to localhost/127.0.0.1:35763 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1464557415-172.17.0.2-1731497263081 (Datanode Uuid d6fd65b9-3d25-4348-bf57-ab1173ebd30b) service to localhost/127.0.0.1:35763 2024-11-13T11:27:55,439 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:27:55,439 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data3/current/BP-1464557415-172.17.0.2-1731497263081 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:27:55,440 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data4/current/BP-1464557415-172.17.0.2-1731497263081 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:27:55,440 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:27:55,441 WARN [DataStreamer for file /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta block BP-1464557415-172.17.0.2-1731497263081:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:55,441 WARN [DataStreamer for file /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/WALs/7bf281cf3991,37577,1731497263703/7bf281cf3991%2C37577%2C1731497263703.1731497263874 block BP-1464557415-172.17.0.2-1731497263081:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:55,441 WARN [DataStreamer for file /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 block BP-1464557415-172.17.0.2-1731497263081:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:55,441 WARN [DataStreamer for file /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497264182 block BP-1464557415-172.17.0.2-1731497263081:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:55,443 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@742046cc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:27:55,443 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@77298c30{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:27:55,443 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:27:55,443 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e8610c3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:27:55,443 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@808171a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/hadoop.log.dir/,STOPPED} 2024-11-13T11:27:55,444 WARN [BP-1464557415-172.17.0.2-1731497263081 heartbeating to localhost/127.0.0.1:35763 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:27:55,444 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:27:55,444 WARN [BP-1464557415-172.17.0.2-1731497263081 heartbeating to localhost/127.0.0.1:35763 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1464557415-172.17.0.2-1731497263081 (Datanode Uuid 1bc3f713-bceb-447e-ac77-c7bcae2658f8) service to localhost/127.0.0.1:35763 2024-11-13T11:27:55,444 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:27:55,445 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data1/current/BP-1464557415-172.17.0.2-1731497263081 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:27:55,445 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data2/current/BP-1464557415-172.17.0.2-1731497263081 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:27:55,445 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:27:55,451 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476., hostname=7bf281cf3991,34477,1731497263757, seqNum=2] 2024-11-13T11:27:55,453 ERROR [FSHLog-0-hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718-prefix:7bf281cf3991,34477,1731497263757 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:55,453 WARN [FSHLog-0-hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718-prefix:7bf281cf3991,34477,1731497263757 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:55,453 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:55,453 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7bf281cf3991%2C34477%2C1731497263757:(num 1731497264182) roll requested 2024-11-13T11:27:55,454 INFO [regionserver/7bf281cf3991:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C34477%2C1731497263757.1731497275453 2024-11-13T11:27:55,459 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:55,459 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:55,459 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:55,459 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:55,459 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:55,460 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497264182 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497275453 2024-11-13T11:27:55,460 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:55,460 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:55,460 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40427:40427),(127.0.0.1/127.0.0.1:39425:39425)] 2024-11-13T11:27:55,460 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497264182 is not closed yet, will try archiving it next time 2024-11-13T11:27:55,461 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-13T11:27:55,462 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-13T11:27:55,462 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497264182 2024-11-13T11:27:55,465 WARN [IPC Server handler 3 on default port 35763 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497264182 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-13T11:27:55,469 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497264182 after 5ms 2024-11-13T11:27:55,846 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:56,787 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:57,461 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:57,462 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497275453 2024-11-13T11:27:57,463 WARN [ResponseProcessor for block BP-1464557415-172.17.0.2-1731497263081:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1464557415-172.17.0.2-1731497263081:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:57,464 WARN [DataStreamer for file /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497275453 block BP-1464557415-172.17.0.2-1731497263081:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK], DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK]) is bad. 2024-11-13T11:27:57,465 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:47010 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:32897:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47010 dst: /127.0.0.1:32897 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:27:57,466 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:45314 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:45033:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45314 dst: /127.0.0.1:45033 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:27:57,468 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7e04641a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:27:57,469 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c24d977{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:27:57,469 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:27:57,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6836669f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:27:57,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e942ed{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/hadoop.log.dir/,STOPPED} 2024-11-13T11:27:57,471 WARN [BP-1464557415-172.17.0.2-1731497263081 heartbeating to localhost/127.0.0.1:35763 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:27:57,471 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:27:57,471 WARN [BP-1464557415-172.17.0.2-1731497263081 heartbeating to localhost/127.0.0.1:35763 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1464557415-172.17.0.2-1731497263081 (Datanode Uuid 7945fe28-0dcd-43f5-9a19-284b7dda4cd2) service to localhost/127.0.0.1:35763 2024-11-13T11:27:57,471 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:27:57,472 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data7/current/BP-1464557415-172.17.0.2-1731497263081 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:27:57,472 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data8/current/BP-1464557415-172.17.0.2-1731497263081 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:27:57,472 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:27:57,847 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:58,788 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:59,461 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:59,462 WARN [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]] 2024-11-13T11:27:59,462 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7bf281cf3991%2C34477%2C1731497263757:(num 1731497275453) roll requested 2024-11-13T11:27:59,462 INFO [regionserver/7bf281cf3991:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C34477%2C1731497263757.1731497279462 2024-11-13T11:27:59,466 WARN [Thread-908 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:59,467 WARN [Thread-908 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK], DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]) is bad. 2024-11-13T11:27:59,467 WARN [Thread-908 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741839_1021 2024-11-13T11:27:59,471 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497264182 after 4009ms 2024-11-13T11:27:59,471 WARN [Thread-908 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK] 2024-11-13T11:27:59,475 WARN [Thread-908 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:59,476 WARN [Thread-908 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK], DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK]) is bad. 2024-11-13T11:27:59,476 WARN [Thread-908 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741840_1022 2024-11-13T11:27:59,476 WARN [Thread-908 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK] 2024-11-13T11:27:59,477 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T11:27:59,478 WARN [Thread-908 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33705 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:59,478 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:53940 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data6]'}, localName='127.0.0.1:45033', datanodeUuid='6bdf1212-1246-456d-913a-4423cf8745fb', xmitsInProgress=0}:Exception transferring block BP-1464557415-172.17.0.2-1731497263081:blk_1073741841_1023 to mirror 127.0.0.1:33705 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:27:59,479 WARN [Thread-908 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK], DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]) is bad. 2024-11-13T11:27:59,479 WARN [Thread-908 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741841_1023 2024-11-13T11:27:59,479 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:53940 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-13T11:27:59,479 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:53940 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:45033:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53940 dst: /127.0.0.1:45033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:27:59,479 WARN [Thread-908 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK] 2024-11-13T11:27:59,483 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:59,483 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:59,483 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:59,483 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:59,483 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:27:59,484 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497275453 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497279462 2024-11-13T11:27:59,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45033 is added to blk_1073741838_1020 (size=3600) 2024-11-13T11:27:59,489 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39425:39425),(127.0.0.1/127.0.0.1:33291:33291)] 2024-11-13T11:27:59,489 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497264182 is not closed yet, will try archiving it next time 2024-11-13T11:27:59,489 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497275453 is not closed yet, will try archiving it next time 2024-11-13T11:27:59,847 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:27:59,887 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497264182 is not closed yet, will try archiving it next time 2024-11-13T11:28:00,789 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:01,481 WARN [ResponseProcessor for block BP-1464557415-172.17.0.2-1731497263081:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1464557415-172.17.0.2-1731497263081:blk_1073741842_1024 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:01,481 WARN [DataStreamer for file /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497279462 block BP-1464557415-172.17.0.2-1731497263081:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK], DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]) is bad. 2024-11-13T11:28:01,483 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:47900 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:44657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47900 dst: /127.0.0.1:44657 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:01,482 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:53950 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:45033:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53950 dst: /127.0.0.1:45033 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:01,483 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@411972d6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:28:01,484 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ffd409{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:28:01,484 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:28:01,484 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19c16326{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:28:01,484 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@365affa2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/hadoop.log.dir/,STOPPED} 2024-11-13T11:28:01,486 WARN [BP-1464557415-172.17.0.2-1731497263081 heartbeating to localhost/127.0.0.1:35763 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:28:01,486 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:28:01,486 WARN [BP-1464557415-172.17.0.2-1731497263081 heartbeating to localhost/127.0.0.1:35763 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1464557415-172.17.0.2-1731497263081 (Datanode Uuid 6bdf1212-1246-456d-913a-4423cf8745fb) service to localhost/127.0.0.1:35763 2024-11-13T11:28:01,487 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:28:01,487 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data5/current/BP-1464557415-172.17.0.2-1731497263081 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:28:01,487 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data6/current/BP-1464557415-172.17.0.2-1731497263081 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:28:01,488 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:28:01,489 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:01,490 WARN [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK]] 2024-11-13T11:28:01,490 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7bf281cf3991%2C34477%2C1731497263757:(num 1731497279462) roll requested 2024-11-13T11:28:01,490 INFO [regionserver/7bf281cf3991:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C34477%2C1731497263757.1731497281490 2024-11-13T11:28:01,493 WARN [Thread-918 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:01,494 WARN [Thread-918 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK], DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]) is bad. 2024-11-13T11:28:01,494 WARN [Thread-918 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741843_1026 2024-11-13T11:28:01,495 WARN [Thread-918 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK] 2024-11-13T11:28:01,496 WARN [Thread-918 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:01,497 WARN [Thread-918 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK], DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]) is bad. 2024-11-13T11:28:01,497 WARN [Thread-918 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741844_1027 2024-11-13T11:28:01,498 WARN [Thread-918 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK] 2024-11-13T11:28:01,499 WARN [Thread-918 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:01,499 WARN [Thread-918 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK], DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]) is bad. 2024-11-13T11:28:01,499 WARN [Thread-918 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741845_1028 2024-11-13T11:28:01,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34477 {}] regionserver.HRegion(8855): Flush requested on c670657098f880144481eec91e517476 2024-11-13T11:28:01,501 WARN [Thread-918 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK] 2024-11-13T11:28:01,501 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c670657098f880144481eec91e517476 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T11:28:01,508 WARN [Thread-918 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32897 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:01,508 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:47926 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data10]'}, localName='127.0.0.1:44657', datanodeUuid='c3f81e58-37b0-430e-8b46-34cc3ca39932', xmitsInProgress=0}:Exception transferring block BP-1464557415-172.17.0.2-1731497263081:blk_1073741846_1029 to mirror 127.0.0.1:32897 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:01,508 WARN [Thread-918 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK], DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK]) is bad. 2024-11-13T11:28:01,508 WARN [Thread-918 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741846_1029 2024-11-13T11:28:01,508 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:47926 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-13T11:28:01,509 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:47926 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:44657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47926 dst: /127.0.0.1:44657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:01,509 WARN [Thread-918 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK] 2024-11-13T11:28:01,510 WARN [IPC Server handler 1 on default port 35763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T11:28:01,510 WARN [IPC Server handler 1 on default port 35763 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T11:28:01,510 WARN [IPC Server handler 1 on default port 35763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T11:28:01,527 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:01,527 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:01,527 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:01,527 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:01,527 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/b76699813bd0477da95d452759d8a8b2 is 1080, key is row0002/info:/1731497277474/Put/seqid=0 2024-11-13T11:28:01,527 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:01,527 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497279462 with entries=12, filesize=12.96 KB; new WAL /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497281490 2024-11-13T11:28:01,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741842_1025 (size=13274) 2024-11-13T11:28:01,529 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:01,529 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK], DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]) is bad. 2024-11-13T11:28:01,529 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741848_1031 2024-11-13T11:28:01,530 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK] 2024-11-13T11:28:01,531 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:01,531 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK], DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]) is bad. 2024-11-13T11:28:01,531 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741849_1032 2024-11-13T11:28:01,532 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK] 2024-11-13T11:28:01,533 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:01,534 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK], DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]) is bad. 2024-11-13T11:28:01,534 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741850_1033 2024-11-13T11:28:01,534 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK] 2024-11-13T11:28:01,536 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:01,536 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK], DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK]) is bad. 2024-11-13T11:28:01,536 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741851_1034 2024-11-13T11:28:01,537 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK] 2024-11-13T11:28:01,537 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33291:33291)] 2024-11-13T11:28:01,537 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497264182 is not closed yet, will try archiving it next time 2024-11-13T11:28:01,538 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497279462 is not closed yet, will try archiving it next time 2024-11-13T11:28:01,538 WARN [IPC Server handler 4 on default port 35763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T11:28:01,538 WARN [IPC Server handler 4 on default port 35763 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T11:28:01,538 WARN [IPC Server handler 4 on default port 35763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T11:28:01,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741852_1035 (size=10347) 2024-11-13T11:28:01,550 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/b76699813bd0477da95d452759d8a8b2 2024-11-13T11:28:01,561 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/b76699813bd0477da95d452759d8a8b2 as hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/b76699813bd0477da95d452759d8a8b2 2024-11-13T11:28:01,570 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/b76699813bd0477da95d452759d8a8b2, entries=5, sequenceid=11, filesize=10.1 K 2024-11-13T11:28:01,571 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=7.35 KB/7531 for c670657098f880144481eec91e517476 in 71ms, sequenceid=11, compaction requested=false 2024-11-13T11:28:01,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c670657098f880144481eec91e517476: 2024-11-13T11:28:01,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34477 {}] regionserver.HRegion(8855): Flush requested on c670657098f880144481eec91e517476 2024-11-13T11:28:01,726 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c670657098f880144481eec91e517476 1/1 column families, dataSize=8.40 KB heapSize=9.25 KB 2024-11-13T11:28:01,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/f48e45fad45a419281d11fa2a8c5a2b4 is 1080, key is row0007/info:/1731497281502/Put/seqid=0 2024-11-13T11:28:01,736 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33705 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:01,736 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:47960 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741853_1036] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data10]'}, localName='127.0.0.1:44657', datanodeUuid='c3f81e58-37b0-430e-8b46-34cc3ca39932', xmitsInProgress=0}:Exception transferring block BP-1464557415-172.17.0.2-1731497263081:blk_1073741853_1036 to mirror 127.0.0.1:33705 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:01,736 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK], DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]) is bad. 2024-11-13T11:28:01,736 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:47960 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741853_1036] {}] datanode.BlockReceiver(316): Block 1073741853 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T11:28:01,736 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741853_1036 2024-11-13T11:28:01,736 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:47960 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741853_1036] {}] datanode.DataXceiver(331): 127.0.0.1:44657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47960 dst: /127.0.0.1:44657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:01,737 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK] 2024-11-13T11:28:01,739 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45561 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:01,739 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:47966 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data10]'}, localName='127.0.0.1:44657', datanodeUuid='c3f81e58-37b0-430e-8b46-34cc3ca39932', xmitsInProgress=0}:Exception transferring block BP-1464557415-172.17.0.2-1731497263081:blk_1073741854_1037 to mirror 127.0.0.1:45561 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:01,739 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK], DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]) is bad. 2024-11-13T11:28:01,739 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:47966 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T11:28:01,739 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741854_1037 2024-11-13T11:28:01,739 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:47966 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:44657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47966 dst: /127.0.0.1:44657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:01,740 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK] 2024-11-13T11:28:01,742 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:01,742 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK], DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK]) is bad. 2024-11-13T11:28:01,742 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741855_1038 2024-11-13T11:28:01,743 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK] 2024-11-13T11:28:01,749 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45033 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:01,749 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:47970 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741856_1039] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data10]'}, localName='127.0.0.1:44657', datanodeUuid='c3f81e58-37b0-430e-8b46-34cc3ca39932', xmitsInProgress=0}:Exception transferring block BP-1464557415-172.17.0.2-1731497263081:blk_1073741856_1039 to mirror 127.0.0.1:45033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:01,749 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK], DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]) is bad. 2024-11-13T11:28:01,749 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741856_1039 2024-11-13T11:28:01,749 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:47970 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741856_1039] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T11:28:01,749 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:47970 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741856_1039] {}] datanode.DataXceiver(331): 127.0.0.1:44657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47970 dst: /127.0.0.1:44657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:01,753 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK] 2024-11-13T11:28:01,754 WARN [IPC Server handler 0 on default port 35763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T11:28:01,755 WARN [IPC Server handler 0 on default port 35763 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T11:28:01,755 WARN [IPC Server handler 0 on default port 35763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T11:28:01,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741857_1040 (size=12506) 2024-11-13T11:28:01,847 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:01,930 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497264182 is not closed yet, will try archiving it next time 2024-11-13T11:28:01,931 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497275453 to hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/oldWALs/7bf281cf3991%2C34477%2C1731497263757.1731497275453 2024-11-13T11:28:02,159 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.40 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/f48e45fad45a419281d11fa2a8c5a2b4 2024-11-13T11:28:02,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/f48e45fad45a419281d11fa2a8c5a2b4 as hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/f48e45fad45a419281d11fa2a8c5a2b4 2024-11-13T11:28:02,177 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/f48e45fad45a419281d11fa2a8c5a2b4, entries=7, sequenceid=22, filesize=12.2 K 2024-11-13T11:28:02,178 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.40 KB/8606, heapSize ~9.23 KB/9456, currentSize=2.10 KB/2150 for c670657098f880144481eec91e517476 in 452ms, sequenceid=22, compaction requested=false 2024-11-13T11:28:02,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c670657098f880144481eec91e517476: 2024-11-13T11:28:02,178 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-13T11:28:02,178 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:28:02,178 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/f48e45fad45a419281d11fa2a8c5a2b4 because midkey is the same as first or last row 2024-11-13T11:28:02,790 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:03,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34477 {}] regionserver.HRegion(8855): Flush requested on c670657098f880144481eec91e517476 2024-11-13T11:28:03,148 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c670657098f880144481eec91e517476 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-13T11:28:03,153 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/6c9f93e7be2b4428b441744cae4c032a is 1079, key is tmprow/info:/1731497283147/Put/seqid=0 2024-11-13T11:28:03,155 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:03,155 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK], DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]) is bad. 2024-11-13T11:28:03,155 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741858_1041 2024-11-13T11:28:03,156 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK] 2024-11-13T11:28:03,157 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:03,157 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK], DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK]) is bad. 2024-11-13T11:28:03,157 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741859_1042 2024-11-13T11:28:03,158 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK] 2024-11-13T11:28:03,159 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:03,159 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK], DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]) is bad. 2024-11-13T11:28:03,159 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741860_1043 2024-11-13T11:28:03,159 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK] 2024-11-13T11:28:03,162 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45033 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:03,162 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:47998 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data10]'}, localName='127.0.0.1:44657', datanodeUuid='c3f81e58-37b0-430e-8b46-34cc3ca39932', xmitsInProgress=0}:Exception transferring block BP-1464557415-172.17.0.2-1731497263081:blk_1073741861_1044 to mirror 127.0.0.1:45033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:03,162 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK], DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]) is bad. 2024-11-13T11:28:03,162 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741861_1044 2024-11-13T11:28:03,162 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:47998 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T11:28:03,162 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:47998 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:44657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47998 dst: /127.0.0.1:44657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:03,163 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK] 2024-11-13T11:28:03,164 WARN [IPC Server handler 0 on default port 35763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T11:28:03,164 WARN [IPC Server handler 0 on default port 35763 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T11:28:03,164 WARN [IPC Server handler 0 on default port 35763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T11:28:03,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741862_1045 (size=6027) 2024-11-13T11:28:03,538 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:03,538 WARN [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK]] 2024-11-13T11:28:03,538 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7bf281cf3991%2C34477%2C1731497263757:(num 1731497281490) roll requested 2024-11-13T11:28:03,539 INFO [regionserver/7bf281cf3991:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C34477%2C1731497263757.1731497283538 2024-11-13T11:28:03,542 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:03,542 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK], DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK]) is bad. 2024-11-13T11:28:03,542 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741863_1046 2024-11-13T11:28:03,543 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK] 2024-11-13T11:28:03,545 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:03,545 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK], DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]) is bad. 2024-11-13T11:28:03,545 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741864_1047 2024-11-13T11:28:03,546 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK] 2024-11-13T11:28:03,547 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:03,547 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK], DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]) is bad. 2024-11-13T11:28:03,547 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741865_1048 2024-11-13T11:28:03,548 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK] 2024-11-13T11:28:03,551 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45561 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:03,551 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:48024 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data10]'}, localName='127.0.0.1:44657', datanodeUuid='c3f81e58-37b0-430e-8b46-34cc3ca39932', xmitsInProgress=0}:Exception transferring block BP-1464557415-172.17.0.2-1731497263081:blk_1073741866_1049 to mirror 127.0.0.1:45561 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:03,551 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK], DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]) is bad. 2024-11-13T11:28:03,551 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741866_1049 2024-11-13T11:28:03,551 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:48024 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-13T11:28:03,551 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:48024 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:44657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48024 dst: /127.0.0.1:44657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:03,552 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK] 2024-11-13T11:28:03,553 WARN [IPC Server handler 1 on default port 35763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T11:28:03,553 WARN [IPC Server handler 1 on default port 35763 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T11:28:03,553 WARN [IPC Server handler 1 on default port 35763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T11:28:03,557 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:03,557 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:03,557 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:03,557 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:03,557 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:03,558 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497281490 with entries=14, filesize=12.82 KB; new WAL /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497283538 2024-11-13T11:28:03,559 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33291:33291)] 2024-11-13T11:28:03,559 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497264182 is not closed yet, will try archiving it next time 2024-11-13T11:28:03,559 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497281490 is not closed yet, will try archiving it next time 2024-11-13T11:28:03,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741847_1030 (size=13133) 2024-11-13T11:28:03,559 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497279462 to hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/oldWALs/7bf281cf3991%2C34477%2C1731497263757.1731497279462 2024-11-13T11:28:03,572 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=32 (bloomFilter=true), to=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/6c9f93e7be2b4428b441744cae4c032a 2024-11-13T11:28:03,580 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/6c9f93e7be2b4428b441744cae4c032a as hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/6c9f93e7be2b4428b441744cae4c032a 2024-11-13T11:28:03,587 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/6c9f93e7be2b4428b441744cae4c032a, entries=1, sequenceid=32, filesize=5.9 K 2024-11-13T11:28:03,588 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for c670657098f880144481eec91e517476 in 440ms, sequenceid=32, compaction requested=true 2024-11-13T11:28:03,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c670657098f880144481eec91e517476: 2024-11-13T11:28:03,588 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-13T11:28:03,588 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:28:03,588 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/f48e45fad45a419281d11fa2a8c5a2b4 because midkey is the same as first or last row 2024-11-13T11:28:03,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c670657098f880144481eec91e517476:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T11:28:03,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:28:03,588 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T11:28:03,590 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T11:28:03,590 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.HStore(1541): c670657098f880144481eec91e517476/info is initiating minor compaction (all files) 2024-11-13T11:28:03,590 INFO [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c670657098f880144481eec91e517476/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476. 2024-11-13T11:28:03,590 INFO [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/b76699813bd0477da95d452759d8a8b2, hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/f48e45fad45a419281d11fa2a8c5a2b4, hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/6c9f93e7be2b4428b441744cae4c032a] into tmpdir=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp, totalSize=28.2 K 2024-11-13T11:28:03,591 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] compactions.Compactor(225): Compacting b76699813bd0477da95d452759d8a8b2, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731497277474 2024-11-13T11:28:03,591 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] compactions.Compactor(225): Compacting f48e45fad45a419281d11fa2a8c5a2b4, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=22, earliestPutTs=1731497281502 2024-11-13T11:28:03,592 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6c9f93e7be2b4428b441744cae4c032a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=32, earliestPutTs=1731497283147 2024-11-13T11:28:03,607 INFO [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c670657098f880144481eec91e517476#info#compaction#21 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T11:28:03,607 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/abfde15e2b5744bf87fef3f537eaaf9b is 1080, key is row0002/info:/1731497277474/Put/seqid=0 2024-11-13T11:28:03,609 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:03,609 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK], DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK]) is bad. 2024-11-13T11:28:03,609 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741868_1051 2024-11-13T11:28:03,610 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK] 2024-11-13T11:28:03,611 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:03,612 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK], DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]) is bad. 2024-11-13T11:28:03,612 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741869_1052 2024-11-13T11:28:03,612 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK] 2024-11-13T11:28:03,614 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:03,614 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK], DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]) is bad. 2024-11-13T11:28:03,614 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741870_1053 2024-11-13T11:28:03,615 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK] 2024-11-13T11:28:03,616 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:03,616 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK], DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]) is bad. 2024-11-13T11:28:03,617 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741871_1054 2024-11-13T11:28:03,617 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK] 2024-11-13T11:28:03,618 WARN [IPC Server handler 2 on default port 35763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T11:28:03,618 WARN [IPC Server handler 2 on default port 35763 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T11:28:03,618 WARN [IPC Server handler 2 on default port 35763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T11:28:03,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741872_1055 (size=17994) 2024-11-13T11:28:03,848 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:03,961 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497264182 is not closed yet, will try archiving it next time 2024-11-13T11:28:04,033 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/abfde15e2b5744bf87fef3f537eaaf9b as hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/abfde15e2b5744bf87fef3f537eaaf9b 2024-11-13T11:28:04,046 INFO [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c670657098f880144481eec91e517476/info of c670657098f880144481eec91e517476 into abfde15e2b5744bf87fef3f537eaaf9b(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T11:28:04,046 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c670657098f880144481eec91e517476: 2024-11-13T11:28:04,046 INFO [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476., storeName=c670657098f880144481eec91e517476/info, priority=13, startTime=1731497283588; duration=0sec 2024-11-13T11:28:04,046 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-13T11:28:04,046 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:28:04,046 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/abfde15e2b5744bf87fef3f537eaaf9b because midkey is the same as first or last row 2024-11-13T11:28:04,046 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-13T11:28:04,046 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:28:04,047 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/abfde15e2b5744bf87fef3f537eaaf9b because midkey is the same as first or last row 2024-11-13T11:28:04,047 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-13T11:28:04,047 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:28:04,047 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/abfde15e2b5744bf87fef3f537eaaf9b because midkey is the same as first or last row 2024-11-13T11:28:04,047 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:28:04,047 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c670657098f880144481eec91e517476:info 2024-11-13T11:28:04,406 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@71b27da7[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44657, datanodeUuid=c3f81e58-37b0-430e-8b46-34cc3ca39932, infoPort=33291, infoSecurePort=0, ipcPort=41597, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081):Failed to transfer BP-1464557415-172.17.0.2-1731497263081:blk_1073741852_1035 to 127.0.0.1:32897 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:04,406 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@667d3a82[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44657, datanodeUuid=c3f81e58-37b0-430e-8b46-34cc3ca39932, infoPort=33291, infoSecurePort=0, ipcPort=41597, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081):Failed to transfer BP-1464557415-172.17.0.2-1731497263081:blk_1073741842_1025 to 127.0.0.1:33705 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:04,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34477 {}] regionserver.HRegion(8855): Flush requested on c670657098f880144481eec91e517476 2024-11-13T11:28:04,571 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c670657098f880144481eec91e517476 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-13T11:28:04,577 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/4f166267146845598c25a484cd5b2a8c is 1079, key is tmprow/info:/1731497284569/Put/seqid=0 2024-11-13T11:28:04,579 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:04,579 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK], DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK]) is bad. 2024-11-13T11:28:04,579 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741873_1056 2024-11-13T11:28:04,580 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32897,DS-830be915-8237-433c-9353-ddc0a0f05e0e,DISK] 2024-11-13T11:28:04,582 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:04,582 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK], DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]) is bad. 2024-11-13T11:28:04,582 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741874_1057 2024-11-13T11:28:04,583 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33705,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK] 2024-11-13T11:28:04,586 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45561 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:04,586 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:48060 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741875_1058] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data10]'}, localName='127.0.0.1:44657', datanodeUuid='c3f81e58-37b0-430e-8b46-34cc3ca39932', xmitsInProgress=0}:Exception transferring block BP-1464557415-172.17.0.2-1731497263081:blk_1073741875_1058 to mirror 127.0.0.1:45561 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:04,586 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK], DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]) is bad. 2024-11-13T11:28:04,586 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741875_1058 2024-11-13T11:28:04,586 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:48060 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741875_1058] {}] datanode.BlockReceiver(316): Block 1073741875 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T11:28:04,586 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:48060 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741875_1058] {}] datanode.DataXceiver(331): 127.0.0.1:44657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48060 dst: /127.0.0.1:44657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:04,587 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK] 2024-11-13T11:28:04,589 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:04,589 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK], DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]) is bad. 2024-11-13T11:28:04,589 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741876_1059 2024-11-13T11:28:04,590 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK] 2024-11-13T11:28:04,590 WARN [IPC Server handler 3 on default port 35763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T11:28:04,591 WARN [IPC Server handler 3 on default port 35763 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T11:28:04,591 WARN [IPC Server handler 3 on default port 35763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T11:28:04,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741877_1060 (size=6027) 2024-11-13T11:28:04,790 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:04,996 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/4f166267146845598c25a484cd5b2a8c 2024-11-13T11:28:05,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/4f166267146845598c25a484cd5b2a8c as hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/4f166267146845598c25a484cd5b2a8c 2024-11-13T11:28:05,013 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/4f166267146845598c25a484cd5b2a8c, entries=1, sequenceid=43, filesize=5.9 K 2024-11-13T11:28:05,014 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for c670657098f880144481eec91e517476 in 444ms, sequenceid=43, compaction requested=false 2024-11-13T11:28:05,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c670657098f880144481eec91e517476: 2024-11-13T11:28:05,014 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-13T11:28:05,014 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:28:05,014 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/abfde15e2b5744bf87fef3f537eaaf9b because midkey is the same as first or last row 2024-11-13T11:28:05,392 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@667d3a82[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44657, datanodeUuid=c3f81e58-37b0-430e-8b46-34cc3ca39932, infoPort=33291, infoSecurePort=0, ipcPort=41597, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081):Failed to transfer BP-1464557415-172.17.0.2-1731497263081:blk_1073741857_1040 to 127.0.0.1:45561 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:05,392 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@71b27da7[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44657, datanodeUuid=c3f81e58-37b0-430e-8b46-34cc3ca39932, infoPort=33291, infoSecurePort=0, ipcPort=41597, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081):Failed to transfer BP-1464557415-172.17.0.2-1731497263081:blk_1073741862_1045 to 127.0.0.1:32897 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:05,560 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:05,560 WARN [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-13T11:28:05,595 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:28:05,599 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:28:05,600 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:28:05,600 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:28:05,600 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T11:28:05,601 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61a456cd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:28:05,601 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1eb85add{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:28:05,709 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6729e823{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/java.io.tmpdir/jetty-localhost-40683-hadoop-hdfs-3_4_1-tests_jar-_-any-3943810639142223748/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:28:05,710 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1a5f90bf{HTTP/1.1, (http/1.1)}{localhost:40683} 2024-11-13T11:28:05,710 INFO [Time-limited test {}] server.Server(415): Started @127871ms 2024-11-13T11:28:05,711 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:28:05,797 WARN [Thread-974 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:28:05,804 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2cf99dd1c09a5a01 with lease ID 0xefc7ddcf82d4f9eb: from storage DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c node DatanodeRegistration(127.0.0.1:36637, datanodeUuid=d6fd65b9-3d25-4348-bf57-ab1173ebd30b, infoPort=34603, infoSecurePort=0, ipcPort=45327, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:28:05,805 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2cf99dd1c09a5a01 with lease ID 0xefc7ddcf82d4f9eb: from storage DS-a20d519f-2d5b-4f11-a5c5-b57e53a8b023 node DatanodeRegistration(127.0.0.1:36637, datanodeUuid=d6fd65b9-3d25-4348-bf57-ab1173ebd30b, infoPort=34603, infoSecurePort=0, ipcPort=45327, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:28:05,848 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:06,790 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:07,392 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@667d3a82[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44657, datanodeUuid=c3f81e58-37b0-430e-8b46-34cc3ca39932, infoPort=33291, infoSecurePort=0, ipcPort=41597, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081):Failed to transfer BP-1464557415-172.17.0.2-1731497263081:blk_1073741872_1055 to 127.0.0.1:45033 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:07,392 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@71b27da7[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44657, datanodeUuid=c3f81e58-37b0-430e-8b46-34cc3ca39932, infoPort=33291, infoSecurePort=0, ipcPort=41597, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081):Failed to transfer BP-1464557415-172.17.0.2-1731497263081:blk_1073741847_1030 to 127.0.0.1:32897 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:07,560 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:07,848 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:08,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36637 is added to blk_1073741877_1060 (size=6027) 2024-11-13T11:28:08,791 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:09,560 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:09,849 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:10,791 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:11,561 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:11,849 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:12,792 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:13,561 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:13,680 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T11:28:13,850 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:13,955 ERROR [FSHLog-0-hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData-prefix:7bf281cf3991,37577,1731497263703 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:13,955 WARN [FSHLog-0-hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData-prefix:7bf281cf3991,37577,1731497263703 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:13,956 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 7bf281cf3991%2C37577%2C1731497263703:(num 1731497263874) roll requested 2024-11-13T11:28:13,957 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C37577%2C1731497263703.1731497293956 2024-11-13T11:28:13,969 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:13,969 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:13,969 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:13,969 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:13,969 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:13,969 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/WALs/7bf281cf3991,37577,1731497263703/7bf281cf3991%2C37577%2C1731497263703.1731497263874 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/WALs/7bf281cf3991,37577,1731497263703/7bf281cf3991%2C37577%2C1731497263703.1731497293956 2024-11-13T11:28:13,970 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:13,970 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:13,970 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/WALs/7bf281cf3991,37577,1731497263703/7bf281cf3991%2C37577%2C1731497263703.1731497263874 2024-11-13T11:28:13,970 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34603:34603),(127.0.0.1/127.0.0.1:33291:33291)] 2024-11-13T11:28:13,970 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/WALs/7bf281cf3991,37577,1731497263703/7bf281cf3991%2C37577%2C1731497263703.1731497263874 is not closed yet, will try archiving it next time 2024-11-13T11:28:13,970 WARN [IPC Server handler 4 on default port 35763 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/WALs/7bf281cf3991,37577,1731497263703/7bf281cf3991%2C37577%2C1731497263703.1731497263874 has not been closed. Lease recovery is in progress. RecoveryId = 1062 for block blk_1073741830_1006 2024-11-13T11:28:13,971 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/WALs/7bf281cf3991,37577,1731497263703/7bf281cf3991%2C37577%2C1731497263703.1731497263874 after 1ms 2024-11-13T11:28:14,792 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:15,562 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:15,820 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2c34512d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1464557415-172.17.0.2-1731497263081:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:45561,null,null]) java.net.ConnectException: Call From 7bf281cf3991/172.17.0.2 to localhost:37897 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-13T11:28:15,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36637 is added to blk_1073741833_1019 (size=455) 2024-11-13T11:28:16,496 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497264182 to hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/oldWALs/7bf281cf3991%2C34477%2C1731497263757.1731497264182 2024-11-13T11:28:16,498 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497281490 to hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/oldWALs/7bf281cf3991%2C34477%2C1731497263757.1731497281490 2024-11-13T11:28:16,793 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:16,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741833_1019 (size=455) 2024-11-13T11:28:17,562 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:17,972 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/WALs/7bf281cf3991,37577,1731497263703/7bf281cf3991%2C37577%2C1731497263703.1731497263874 after 4002ms 2024-11-13T11:28:18,793 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:19,563 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:19,805 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1d523e5c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36637, datanodeUuid=d6fd65b9-3d25-4348-bf57-ab1173ebd30b, infoPort=34603, infoSecurePort=0, ipcPort=45327, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081):Failed to transfer BP-1464557415-172.17.0.2-1731497263081:blk_1073741836_1012 to 127.0.0.1:45033 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:19,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741832_1008 (size=32) 2024-11-13T11:28:20,794 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:20,804 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1d523e5c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36637, datanodeUuid=d6fd65b9-3d25-4348-bf57-ab1173ebd30b, infoPort=34603, infoSecurePort=0, ipcPort=45327, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081):Failed to transfer BP-1464557415-172.17.0.2-1731497263081:blk_1073741826_1002 to 127.0.0.1:45033 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:20,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741828_1004 (size=1189) 2024-11-13T11:28:21,206 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C34477%2C1731497263757.1731497301206 2024-11-13T11:28:21,210 WARN [Thread-1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:21,210 WARN [Thread-1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741879_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK], DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]) is bad. 2024-11-13T11:28:21,210 WARN [Thread-1009 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741879_1063 2024-11-13T11:28:21,211 WARN [Thread-1009 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK] 2024-11-13T11:28:21,217 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:21,217 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:21,218 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:21,218 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:21,218 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:21,218 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497283538 with entries=13, filesize=11.81 KB; new WAL /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497301206 2024-11-13T11:28:21,219 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33291:33291),(127.0.0.1/127.0.0.1:34603:34603)] 2024-11-13T11:28:21,219 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497283538 is not closed yet, will try archiving it next time 2024-11-13T11:28:21,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741867_1050 (size=12100) 2024-11-13T11:28:21,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34477 {}] regionserver.HRegion(8855): Flush requested on c670657098f880144481eec91e517476 2024-11-13T11:28:21,228 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c670657098f880144481eec91e517476 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-13T11:28:21,236 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/419031969050495c85283909c309f5ca is 1080, key is row0013/info:/1731497301221/Put/seqid=0 2024-11-13T11:28:21,238 WARN [Thread-1015 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:21,238 WARN [Thread-1015 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741881_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK], DatanodeInfoWithStorage[127.0.0.1:36637,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]) is bad. 2024-11-13T11:28:21,238 WARN [Thread-1015 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741881_1065 2024-11-13T11:28:21,239 WARN [Thread-1015 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK] 2024-11-13T11:28:21,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36637 is added to blk_1073741882_1066 (size=9267) 2024-11-13T11:28:21,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741882_1066 (size=9267) 2024-11-13T11:28:21,244 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/419031969050495c85283909c309f5ca 2024-11-13T11:28:21,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/419031969050495c85283909c309f5ca as hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/419031969050495c85283909c309f5ca 2024-11-13T11:28:21,258 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/419031969050495c85283909c309f5ca, entries=4, sequenceid=53, filesize=9.0 K 2024-11-13T11:28:21,259 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7528, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8607 for c670657098f880144481eec91e517476 in 31ms, sequenceid=53, compaction requested=true 2024-11-13T11:28:21,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c670657098f880144481eec91e517476: 2024-11-13T11:28:21,259 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=32.5 K, sizeToCheck=16.0 K 2024-11-13T11:28:21,259 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:28:21,259 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/abfde15e2b5744bf87fef3f537eaaf9b because midkey is the same as first or last row 2024-11-13T11:28:21,259 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c670657098f880144481eec91e517476:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T11:28:21,259 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:28:21,259 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T11:28:21,260 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33288 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T11:28:21,260 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.HStore(1541): c670657098f880144481eec91e517476/info is initiating minor compaction (all files) 2024-11-13T11:28:21,260 INFO [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c670657098f880144481eec91e517476/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476. 2024-11-13T11:28:21,261 INFO [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/abfde15e2b5744bf87fef3f537eaaf9b, hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/4f166267146845598c25a484cd5b2a8c, hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/419031969050495c85283909c309f5ca] into tmpdir=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp, totalSize=32.5 K 2024-11-13T11:28:21,261 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] compactions.Compactor(225): Compacting abfde15e2b5744bf87fef3f537eaaf9b, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=32, earliestPutTs=1731497277474 2024-11-13T11:28:21,261 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4f166267146845598c25a484cd5b2a8c, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731497284569 2024-11-13T11:28:21,262 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] compactions.Compactor(225): Compacting 419031969050495c85283909c309f5ca, keycount=4, bloomtype=ROW, size=9.0 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1731497285382 2024-11-13T11:28:21,279 INFO [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c670657098f880144481eec91e517476#info#compaction#24 average throughput is 7.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T11:28:21,280 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/410c96ce079f471380f3a27c6645b2f8 is 1080, key is row0002/info:/1731497277474/Put/seqid=0 2024-11-13T11:28:21,282 WARN [Thread-1024 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:21,282 WARN [Thread-1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741883_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK], DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]) is bad. 2024-11-13T11:28:21,282 WARN [Thread-1024 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741883_1067 2024-11-13T11:28:21,283 WARN [Thread-1024 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK] 2024-11-13T11:28:21,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741884_1068 (size=21344) 2024-11-13T11:28:21,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36637 is added to blk_1073741884_1068 (size=21344) 2024-11-13T11:28:21,294 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/410c96ce079f471380f3a27c6645b2f8 as hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/410c96ce079f471380f3a27c6645b2f8 2024-11-13T11:28:21,302 INFO [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c670657098f880144481eec91e517476/info of c670657098f880144481eec91e517476 into 410c96ce079f471380f3a27c6645b2f8(size=20.8 K), total size for store is 20.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T11:28:21,302 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c670657098f880144481eec91e517476: 2024-11-13T11:28:21,303 INFO [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476., storeName=c670657098f880144481eec91e517476/info, priority=13, startTime=1731497301259; duration=0sec 2024-11-13T11:28:21,303 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=20.8 K, sizeToCheck=16.0 K 2024-11-13T11:28:21,303 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:28:21,303 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/410c96ce079f471380f3a27c6645b2f8 because midkey is the same as first or last row 2024-11-13T11:28:21,303 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=20.8 K, sizeToCheck=16.0 K 2024-11-13T11:28:21,303 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:28:21,303 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/410c96ce079f471380f3a27c6645b2f8 because midkey is the same as first or last row 2024-11-13T11:28:21,303 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=20.8 K, sizeToCheck=16.0 K 2024-11-13T11:28:21,303 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:28:21,303 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/410c96ce079f471380f3a27c6645b2f8 because midkey is the same as first or last row 2024-11-13T11:28:21,303 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:28:21,304 DEBUG [RS:0;7bf281cf3991:34477-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c670657098f880144481eec91e517476:info 2024-11-13T11:28:21,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34477 {}] regionserver.HRegion(8855): Flush requested on c670657098f880144481eec91e517476 2024-11-13T11:28:21,457 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c670657098f880144481eec91e517476 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-13T11:28:21,463 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/c8352f17e7e2498da48dd8b56b46b75b is 1080, key is row0016/info:/1731497301230/Put/seqid=0 2024-11-13T11:28:21,466 WARN [Thread-1030 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45033 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:21,466 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:59610 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741885_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data10]'}, localName='127.0.0.1:44657', datanodeUuid='c3f81e58-37b0-430e-8b46-34cc3ca39932', xmitsInProgress=0}:Exception transferring block BP-1464557415-172.17.0.2-1731497263081:blk_1073741885_1069 to mirror 127.0.0.1:45033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:21,466 WARN [Thread-1030 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741885_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK], DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]) is bad. 2024-11-13T11:28:21,466 WARN [Thread-1030 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741885_1069 2024-11-13T11:28:21,466 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:59610 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741885_1069] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T11:28:21,466 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:59610 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741885_1069] {}] datanode.DataXceiver(331): 127.0.0.1:44657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59610 dst: /127.0.0.1:44657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:21,466 WARN [Thread-1030 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK] 2024-11-13T11:28:21,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36637 is added to blk_1073741886_1070 (size=13583) 2024-11-13T11:28:21,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741886_1070 (size=13583) 2024-11-13T11:28:21,471 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/c8352f17e7e2498da48dd8b56b46b75b 2024-11-13T11:28:21,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/.tmp/info/c8352f17e7e2498da48dd8b56b46b75b as hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/c8352f17e7e2498da48dd8b56b46b75b 2024-11-13T11:28:21,485 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/c8352f17e7e2498da48dd8b56b46b75b, entries=8, sequenceid=66, filesize=13.3 K 2024-11-13T11:28:21,486 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9682, heapSize ~10.36 KB/10608, currentSize=0 B/0 for c670657098f880144481eec91e517476 in 30ms, sequenceid=66, compaction requested=false 2024-11-13T11:28:21,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c670657098f880144481eec91e517476: 2024-11-13T11:28:21,486 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.1 K, sizeToCheck=16.0 K 2024-11-13T11:28:21,486 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:28:21,486 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/410c96ce079f471380f3a27c6645b2f8 because midkey is the same as first or last row 2024-11-13T11:28:21,563 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:21,563 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-13T11:28:21,621 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.1731497283538 to hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/oldWALs/7bf281cf3991%2C34477%2C1731497263757.1731497283538 2024-11-13T11:28:21,658 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T11:28:21,658 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T11:28:21,659 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:28:21,659 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:28:21,659 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:28:21,660 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T11:28:21,660 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T11:28:21,660 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=698256917, stopped=false 2024-11-13T11:28:21,661 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7bf281cf3991,37577,1731497263703 2024-11-13T11:28:21,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T11:28:21,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38159-0x10038d6deb20002, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T11:28:21,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38159-0x10038d6deb20002, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:28:21,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34477-0x10038d6deb20001, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T11:28:21,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34477-0x10038d6deb20001, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:28:21,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:28:21,664 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T11:28:21,664 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T11:28:21,664 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:28:21,665 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:28:21,665 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:28:21,665 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7bf281cf3991,34477,1731497263757' ***** 2024-11-13T11:28:21,665 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T11:28:21,665 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38159-0x10038d6deb20002, quorum=127.0.0.1:59956, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:28:21,665 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7bf281cf3991,38159,1731497264714' ***** 2024-11-13T11:28:21,665 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34477-0x10038d6deb20001, quorum=127.0.0.1:59956, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:28:21,665 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T11:28:21,665 INFO [RS:1;7bf281cf3991:38159 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T11:28:21,666 INFO [RS:1;7bf281cf3991:38159 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T11:28:21,666 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T11:28:21,666 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T11:28:21,666 INFO [RS:1;7bf281cf3991:38159 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T11:28:21,666 INFO [RS:1;7bf281cf3991:38159 {}] regionserver.HRegionServer(959): stopping server 7bf281cf3991,38159,1731497264714 2024-11-13T11:28:21,666 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T11:28:21,666 INFO [RS:0;7bf281cf3991:34477 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T11:28:21,666 INFO [RS:1;7bf281cf3991:38159 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T11:28:21,666 INFO [RS:0;7bf281cf3991:34477 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T11:28:21,666 INFO [RS:1;7bf281cf3991:38159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;7bf281cf3991:38159. 2024-11-13T11:28:21,666 DEBUG [RS:1;7bf281cf3991:38159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:28:21,666 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.HRegionServer(3091): Received CLOSE for c670657098f880144481eec91e517476 2024-11-13T11:28:21,666 DEBUG [RS:1;7bf281cf3991:38159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:28:21,666 INFO [RS:1;7bf281cf3991:38159 {}] regionserver.HRegionServer(976): stopping server 7bf281cf3991,38159,1731497264714; all regions closed. 2024-11-13T11:28:21,666 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.HRegionServer(959): stopping server 7bf281cf3991,34477,1731497263757 2024-11-13T11:28:21,666 INFO [RS:0;7bf281cf3991:34477 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T11:28:21,666 INFO [RS:0;7bf281cf3991:34477 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7bf281cf3991:34477. 2024-11-13T11:28:21,667 DEBUG [RS:0;7bf281cf3991:34477 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:28:21,667 DEBUG [RS:0;7bf281cf3991:34477 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:28:21,667 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c670657098f880144481eec91e517476, disabling compactions & flushes 2024-11-13T11:28:21,667 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:21,667 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476. 2024-11-13T11:28:21,667 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T11:28:21,667 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476. 2024-11-13T11:28:21,667 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T11:28:21,667 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T11:28:21,667 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476. after waiting 0 ms 2024-11-13T11:28:21,667 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476. 2024-11-13T11:28:21,667 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T11:28:21,667 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:21,667 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:21,667 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-13T11:28:21,667 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:21,667 DEBUG [RS:0;7bf281cf3991:34477 {}] regionserver.HRegionServer(1325): Online Regions={c670657098f880144481eec91e517476=TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476., 1588230740=hbase:meta,,1.1588230740} 2024-11-13T11:28:21,667 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:21,667 DEBUG [RS:0;7bf281cf3991:34477 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c670657098f880144481eec91e517476 2024-11-13T11:28:21,667 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T11:28:21,668 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T11:28:21,668 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T11:28:21,668 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T11:28:21,668 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T11:28:21,668 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/b76699813bd0477da95d452759d8a8b2, hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/f48e45fad45a419281d11fa2a8c5a2b4, hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/abfde15e2b5744bf87fef3f537eaaf9b, hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/6c9f93e7be2b4428b441744cae4c032a, hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/4f166267146845598c25a484cd5b2a8c, hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/419031969050495c85283909c309f5ca] to archive 2024-11-13T11:28:21,668 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-13T11:28:21,668 ERROR [FSHLog-0-hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718-prefix:7bf281cf3991,34477,1731497263757.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:21,668 WARN [FSHLog-0-hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718-prefix:7bf281cf3991,34477,1731497263757.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:21,668 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7bf281cf3991%2C34477%2C1731497263757.meta:.meta(num 1731497264594) roll requested 2024-11-13T11:28:21,669 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:21,669 INFO [regionserver/7bf281cf3991:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C34477%2C1731497263757.meta.1731497301669.meta 2024-11-13T11:28:21,669 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T11:28:21,669 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:21,669 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 2024-11-13T11:28:21,669 WARN [IPC Server handler 1 on default port 35763 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 has not been closed. Lease recovery is in progress. RecoveryId = 1071 for block blk_1073741837_1013 2024-11-13T11:28:21,670 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 after 1ms 2024-11-13T11:28:21,671 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/b76699813bd0477da95d452759d8a8b2 to hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/b76699813bd0477da95d452759d8a8b2 2024-11-13T11:28:21,672 WARN [Thread-1037 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1072 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45033 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:21,672 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:59630 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741887_1072] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data10]'}, localName='127.0.0.1:44657', datanodeUuid='c3f81e58-37b0-430e-8b46-34cc3ca39932', xmitsInProgress=0}:Exception transferring block BP-1464557415-172.17.0.2-1731497263081:blk_1073741887_1072 to mirror 127.0.0.1:45033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:21,672 WARN [Thread-1037 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741887_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44657,DS-79dffad5-8135-4511-bcb2-8d51955d404d,DISK], DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]) is bad. 2024-11-13T11:28:21,672 WARN [Thread-1037 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741887_1072 2024-11-13T11:28:21,672 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:59630 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741887_1072] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-13T11:28:21,672 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:59630 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741887_1072] {}] datanode.DataXceiver(331): 127.0.0.1:44657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59630 dst: /127.0.0.1:44657 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:21,672 WARN [Thread-1037 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK] 2024-11-13T11:28:21,673 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/f48e45fad45a419281d11fa2a8c5a2b4 to hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/f48e45fad45a419281d11fa2a8c5a2b4 2024-11-13T11:28:21,674 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/abfde15e2b5744bf87fef3f537eaaf9b to hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/abfde15e2b5744bf87fef3f537eaaf9b 2024-11-13T11:28:21,675 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/6c9f93e7be2b4428b441744cae4c032a to hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/6c9f93e7be2b4428b441744cae4c032a 2024-11-13T11:28:21,677 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/4f166267146845598c25a484cd5b2a8c to hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/4f166267146845598c25a484cd5b2a8c 2024-11-13T11:28:21,678 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:21,678 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:21,679 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:21,679 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:21,679 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/419031969050495c85283909c309f5ca to hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/info/419031969050495c85283909c309f5ca 2024-11-13T11:28:21,679 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:21,679 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497301669.meta 2024-11-13T11:28:21,679 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:21,679 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45561,DS-2138b76d-f810-4363-a506-30a8cc6ba641,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:21,679 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta 2024-11-13T11:28:21,679 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=7bf281cf3991:37577 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-13T11:28:21,679 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [b76699813bd0477da95d452759d8a8b2=10347, f48e45fad45a419281d11fa2a8c5a2b4=12506, abfde15e2b5744bf87fef3f537eaaf9b=17994, 6c9f93e7be2b4428b441744cae4c032a=6027, 4f166267146845598c25a484cd5b2a8c=6027, 419031969050495c85283909c309f5ca=9267] 2024-11-13T11:28:21,680 WARN [IPC Server handler 4 on default port 35763 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta has not been closed. Lease recovery is in progress. RecoveryId = 1074 for block blk_1073741834_1010 2024-11-13T11:28:21,680 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta after 1ms 2024-11-13T11:28:21,680 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33291:33291),(127.0.0.1/127.0.0.1:34603:34603)] 2024-11-13T11:28:21,680 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta is not closed yet, will try archiving it next time 2024-11-13T11:28:21,689 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c670657098f880144481eec91e517476/recovered.edits/69.seqid, newMaxSeqId=69, maxSeqId=1 2024-11-13T11:28:21,690 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476. 2024-11-13T11:28:21,690 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c670657098f880144481eec91e517476: Waiting for close lock at 1731497301666Running coprocessor pre-close hooks at 1731497301666Disabling compacts and flushes for region at 1731497301666Disabling writes for close at 1731497301667 (+1 ms)Writing region close event to WAL at 1731497301680 (+13 ms)Running coprocessor post-close hooks at 1731497301689 (+9 ms)Closed at 1731497301690 (+1 ms) 2024-11-13T11:28:21,690 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476. 2024-11-13T11:28:21,700 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740/.tmp/info/5238af8467474b1a8555a95c387fab02 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731497264820.c670657098f880144481eec91e517476./info:regioninfo/1731497265184/Put/seqid=0 2024-11-13T11:28:21,703 WARN [Thread-1045 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45033 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:21,703 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:60040 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741889_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data4]'}, localName='127.0.0.1:36637', datanodeUuid='d6fd65b9-3d25-4348-bf57-ab1173ebd30b', xmitsInProgress=0}:Exception transferring block BP-1464557415-172.17.0.2-1731497263081:blk_1073741889_1075 to mirror 127.0.0.1:45033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:21,703 WARN [Thread-1045 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741889_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36637,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK], DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]) is bad. 2024-11-13T11:28:21,703 WARN [Thread-1045 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741889_1075 2024-11-13T11:28:21,703 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:60040 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741889_1075] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T11:28:21,703 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:60040 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741889_1075] {}] datanode.DataXceiver(331): 127.0.0.1:36637:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60040 dst: /127.0.0.1:36637 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:21,704 WARN [Thread-1045 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK] 2024-11-13T11:28:21,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741890_1076 (size=7089) 2024-11-13T11:28:21,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36637 is added to blk_1073741890_1076 (size=7089) 2024-11-13T11:28:21,709 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740/.tmp/info/5238af8467474b1a8555a95c387fab02 2024-11-13T11:28:21,727 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740/.tmp/ns/32ce9af898f54fe8b23a8aad5f85a832 is 43, key is default/ns:d/1731497264647/Put/seqid=0 2024-11-13T11:28:21,730 WARN [Thread-1052 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1077 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45033 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:21,730 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:60062 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741891_1077] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data4]'}, localName='127.0.0.1:36637', datanodeUuid='d6fd65b9-3d25-4348-bf57-ab1173ebd30b', xmitsInProgress=0}:Exception transferring block BP-1464557415-172.17.0.2-1731497263081:blk_1073741891_1077 to mirror 127.0.0.1:45033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:21,730 WARN [Thread-1052 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1464557415-172.17.0.2-1731497263081:blk_1073741891_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36637,DS-507a33da-3ff0-49b0-be2b-8c6cc0d7241c,DISK], DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK]) is bad. 2024-11-13T11:28:21,730 WARN [Thread-1052 {}] hdfs.DataStreamer(1850): Abandoning BP-1464557415-172.17.0.2-1731497263081:blk_1073741891_1077 2024-11-13T11:28:21,730 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:60062 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741891_1077] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T11:28:21,730 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2135855254_22 at /127.0.0.1:60062 [Receiving block BP-1464557415-172.17.0.2-1731497263081:blk_1073741891_1077] {}] datanode.DataXceiver(331): 127.0.0.1:36637:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60062 dst: /127.0.0.1:36637 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:21,730 WARN [Thread-1052 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45033,DS-9c9e5795-cff5-4085-ab71-b0c6f828ca65,DISK] 2024-11-13T11:28:21,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36637 is added to blk_1073741892_1078 (size=5153) 2024-11-13T11:28:21,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741892_1078 (size=5153) 2024-11-13T11:28:21,735 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740/.tmp/ns/32ce9af898f54fe8b23a8aad5f85a832 2024-11-13T11:28:21,755 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740/.tmp/table/1768ba1e6dca41ca80401b979448178a is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731497265195/Put/seqid=0 2024-11-13T11:28:21,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741893_1079 (size=5424) 2024-11-13T11:28:21,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36637 is added to blk_1073741893_1079 (size=5424) 2024-11-13T11:28:21,760 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740/.tmp/table/1768ba1e6dca41ca80401b979448178a 2024-11-13T11:28:21,767 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740/.tmp/info/5238af8467474b1a8555a95c387fab02 as hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740/info/5238af8467474b1a8555a95c387fab02 2024-11-13T11:28:21,773 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740/info/5238af8467474b1a8555a95c387fab02, entries=10, sequenceid=11, filesize=6.9 K 2024-11-13T11:28:21,774 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740/.tmp/ns/32ce9af898f54fe8b23a8aad5f85a832 as hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740/ns/32ce9af898f54fe8b23a8aad5f85a832 2024-11-13T11:28:21,779 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740/ns/32ce9af898f54fe8b23a8aad5f85a832, entries=2, sequenceid=11, filesize=5.0 K 2024-11-13T11:28:21,780 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740/.tmp/table/1768ba1e6dca41ca80401b979448178a as hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740/table/1768ba1e6dca41ca80401b979448178a 2024-11-13T11:28:21,785 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740/table/1768ba1e6dca41ca80401b979448178a, entries=2, sequenceid=11, filesize=5.3 K 2024-11-13T11:28:21,786 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 118ms, sequenceid=11, compaction requested=false 2024-11-13T11:28:21,793 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-13T11:28:21,794 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T11:28:21,794 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T11:28:21,794 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731497301667Running coprocessor pre-close hooks at 1731497301667Disabling compacts and flushes for region at 1731497301667Disabling writes for close at 1731497301668 (+1 ms)Obtaining lock to block concurrent updates at 1731497301668Preparing flush snapshotting stores in 1588230740 at 1731497301668Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731497301668Flushing stores of hbase:meta,,1.1588230740 at 1731497301681 (+13 ms)Flushing 1588230740/info: creating writer at 1731497301681Flushing 1588230740/info: appending metadata at 1731497301700 (+19 ms)Flushing 1588230740/info: closing flushed file at 1731497301700Flushing 1588230740/ns: creating writer at 1731497301714 (+14 ms)Flushing 1588230740/ns: appending metadata at 1731497301727 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1731497301727Flushing 1588230740/table: creating writer at 1731497301741 (+14 ms)Flushing 1588230740/table: appending metadata at 1731497301754 (+13 ms)Flushing 1588230740/table: closing flushed file at 1731497301754Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@538dec20: reopening flushed file at 1731497301766 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f300585: reopening flushed file at 1731497301773 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14fb29dc: reopening flushed file at 1731497301779 (+6 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 118ms, sequenceid=11, compaction requested=false at 1731497301787 (+8 ms)Writing region close event to WAL at 1731497301789 (+2 ms)Running coprocessor post-close hooks at 1731497301794 (+5 ms)Closed at 1731497301794 2024-11-13T11:28:21,794 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T11:28:21,817 INFO [regionserver/7bf281cf3991:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T11:28:21,817 INFO [regionserver/7bf281cf3991:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T11:28:21,868 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.HRegionServer(976): stopping server 7bf281cf3991,34477,1731497263757; all regions closed. 2024-11-13T11:28:21,868 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:21,868 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:21,868 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:21,868 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:21,868 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:21,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36637 is added to blk_1073741888_1073 (size=825) 2024-11-13T11:28:21,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741888_1073 (size=825) 2024-11-13T11:28:22,044 INFO [regionserver/7bf281cf3991:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T11:28:22,083 INFO [regionserver/7bf281cf3991:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T11:28:22,083 INFO [regionserver/7bf281cf3991:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T11:28:22,394 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@71b27da7[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44657, datanodeUuid=c3f81e58-37b0-430e-8b46-34cc3ca39932, infoPort=33291, infoSecurePort=0, ipcPort=41597, storageInfo=lv=-57;cid=testClusterID;nsid=621816180;c=1731497263081):Failed to transfer BP-1464557415-172.17.0.2-1731497263081:blk_1073741867_1050 to 127.0.0.1:45033 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:22,789 INFO [regionserver/7bf281cf3991:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T11:28:22,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741827_1003 (size=196) 2024-11-13T11:28:22,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741829_1005 (size=34) 2024-11-13T11:28:23,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741825_1001 (size=7) 2024-11-13T11:28:23,965 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-13T11:28:23,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T11:28:23,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T11:28:25,066 INFO [master/7bf281cf3991:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-13T11:28:25,067 INFO [master/7bf281cf3991:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-13T11:28:25,671 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 after 4002ms 2024-11-13T11:28:25,681 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta after 4002ms 2024-11-13T11:28:25,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741835_1011 (size=393) 2024-11-13T11:28:25,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741831_1007 (size=1321) 2024-11-13T11:28:25,824 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3aa413f5 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1464557415-172.17.0.2-1731497263081:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:45561,null,null]) java.net.ConnectException: Call From 7bf281cf3991/172.17.0.2 to localhost:37897 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-13T11:28:26,669 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-13T11:28:26,676 DEBUG [RS:1;7bf281cf3991:38159 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/oldWALs 2024-11-13T11:28:26,676 INFO [RS:1;7bf281cf3991:38159 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7bf281cf3991%2C38159%2C1731497264714:(num 1731497264924) 2024-11-13T11:28:26,676 DEBUG [RS:1;7bf281cf3991:38159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:28:26,676 INFO [RS:1;7bf281cf3991:38159 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T11:28:26,677 INFO [RS:1;7bf281cf3991:38159 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T11:28:26,677 INFO [RS:1;7bf281cf3991:38159 {}] hbase.ChoreService(370): Chore service for: regionserver/7bf281cf3991:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T11:28:26,678 INFO [RS:1;7bf281cf3991:38159 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T11:28:26,678 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T11:28:26,678 INFO [RS:1;7bf281cf3991:38159 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T11:28:26,678 INFO [RS:1;7bf281cf3991:38159 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T11:28:26,678 INFO [RS:1;7bf281cf3991:38159 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T11:28:26,678 INFO [RS:1;7bf281cf3991:38159 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38159 2024-11-13T11:28:26,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38159-0x10038d6deb20002, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7bf281cf3991,38159,1731497264714 2024-11-13T11:28:26,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T11:28:26,681 INFO [RS:1;7bf281cf3991:38159 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T11:28:26,681 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7bf281cf3991,38159,1731497264714] 2024-11-13T11:28:26,682 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7bf281cf3991,38159,1731497264714 already deleted, retry=false 2024-11-13T11:28:26,682 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7bf281cf3991,38159,1731497264714 expired; onlineServers=1 2024-11-13T11:28:26,690 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:26,706 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:26,707 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:26,707 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:26,707 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:26,707 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:26,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:26,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:26,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38159-0x10038d6deb20002, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:28:26,782 INFO [RS:1;7bf281cf3991:38159 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T11:28:26,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38159-0x10038d6deb20002, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:28:26,782 INFO [RS:1;7bf281cf3991:38159 {}] regionserver.HRegionServer(1031): Exiting; stopping=7bf281cf3991,38159,1731497264714; zookeeper connection closed. 2024-11-13T11:28:26,783 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6029d44f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6029d44f 2024-11-13T11:28:26,869 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-13T11:28:26,875 DEBUG [RS:0;7bf281cf3991:34477 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/oldWALs 2024-11-13T11:28:26,875 INFO [RS:0;7bf281cf3991:34477 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7bf281cf3991%2C34477%2C1731497263757.meta:.meta(num 1731497301669) 2024-11-13T11:28:26,876 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:26,877 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:26,877 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:26,877 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:26,877 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:26,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36637 is added to blk_1073741880_1064 (size=16308) 2024-11-13T11:28:26,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741880_1064 (size=16308) 2024-11-13T11:28:26,884 DEBUG [RS:0;7bf281cf3991:34477 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/oldWALs 2024-11-13T11:28:26,884 INFO [RS:0;7bf281cf3991:34477 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7bf281cf3991%2C34477%2C1731497263757:(num 1731497301206) 2024-11-13T11:28:26,884 DEBUG [RS:0;7bf281cf3991:34477 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:28:26,884 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T11:28:26,884 INFO [RS:0;7bf281cf3991:34477 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T11:28:26,885 INFO [RS:0;7bf281cf3991:34477 {}] hbase.ChoreService(370): Chore service for: regionserver/7bf281cf3991:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-13T11:28:26,885 INFO [RS:0;7bf281cf3991:34477 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T11:28:26,885 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T11:28:26,885 INFO [RS:0;7bf281cf3991:34477 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34477 2024-11-13T11:28:26,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34477-0x10038d6deb20001, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7bf281cf3991,34477,1731497263757 2024-11-13T11:28:26,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T11:28:26,887 INFO [RS:0;7bf281cf3991:34477 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T11:28:26,889 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7bf281cf3991,34477,1731497263757] 2024-11-13T11:28:26,890 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7bf281cf3991,34477,1731497263757 already deleted, retry=false 2024-11-13T11:28:26,890 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7bf281cf3991,34477,1731497263757 expired; onlineServers=0 2024-11-13T11:28:26,890 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7bf281cf3991,37577,1731497263703' ***** 2024-11-13T11:28:26,890 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T11:28:26,890 INFO [M:0;7bf281cf3991:37577 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T11:28:26,890 INFO [M:0;7bf281cf3991:37577 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T11:28:26,890 DEBUG [M:0;7bf281cf3991:37577 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T11:28:26,891 DEBUG [M:0;7bf281cf3991:37577 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T11:28:26,891 DEBUG [master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497263957 {}] cleaner.HFileCleaner(306): Exit Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497263957,5,FailOnTimeoutGroup] 2024-11-13T11:28:26,891 DEBUG [master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497263961 {}] cleaner.HFileCleaner(306): Exit Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497263961,5,FailOnTimeoutGroup] 2024-11-13T11:28:26,891 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T11:28:26,891 INFO [M:0;7bf281cf3991:37577 {}] hbase.ChoreService(370): Chore service for: master/7bf281cf3991:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T11:28:26,891 INFO [M:0;7bf281cf3991:37577 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T11:28:26,891 DEBUG [M:0;7bf281cf3991:37577 {}] master.HMaster(1795): Stopping service threads 2024-11-13T11:28:26,891 INFO [M:0;7bf281cf3991:37577 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T11:28:26,891 INFO [M:0;7bf281cf3991:37577 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T11:28:26,891 INFO [M:0;7bf281cf3991:37577 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T11:28:26,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T11:28:26,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:28:26,892 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T11:28:26,892 DEBUG [M:0;7bf281cf3991:37577 {}] zookeeper.ZKUtil(347): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T11:28:26,892 WARN [M:0;7bf281cf3991:37577 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T11:28:26,893 INFO [M:0;7bf281cf3991:37577 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/.lastflushedseqids 2024-11-13T11:28:26,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741894_1080 (size=130) 2024-11-13T11:28:26,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36637 is added to blk_1073741894_1080 (size=130) 2024-11-13T11:28:26,903 INFO [M:0;7bf281cf3991:37577 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T11:28:26,904 INFO [M:0;7bf281cf3991:37577 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T11:28:26,904 DEBUG [M:0;7bf281cf3991:37577 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T11:28:26,904 INFO [M:0;7bf281cf3991:37577 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:28:26,904 DEBUG [M:0;7bf281cf3991:37577 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:28:26,904 DEBUG [M:0;7bf281cf3991:37577 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T11:28:26,904 DEBUG [M:0;7bf281cf3991:37577 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:28:26,904 INFO [M:0;7bf281cf3991:37577 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-13T11:28:26,921 DEBUG [M:0;7bf281cf3991:37577 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8ce1705115c44677adcb2326b424000c is 82, key is hbase:meta,,1/info:regioninfo/1731497264631/Put/seqid=0 2024-11-13T11:28:26,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36637 is added to blk_1073741895_1081 (size=5672) 2024-11-13T11:28:26,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741895_1081 (size=5672) 2024-11-13T11:28:26,926 INFO [M:0;7bf281cf3991:37577 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8ce1705115c44677adcb2326b424000c 2024-11-13T11:28:26,946 DEBUG [M:0;7bf281cf3991:37577 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/84e24c9e3717462ba15f3466cb15fb0b is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731497265200/Put/seqid=0 2024-11-13T11:28:26,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36637 is added to blk_1073741896_1082 (size=6255) 2024-11-13T11:28:26,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741896_1082 (size=6255) 2024-11-13T11:28:26,951 INFO [M:0;7bf281cf3991:37577 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/84e24c9e3717462ba15f3466cb15fb0b 2024-11-13T11:28:26,956 INFO [M:0;7bf281cf3991:37577 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 84e24c9e3717462ba15f3466cb15fb0b 2024-11-13T11:28:26,969 DEBUG [M:0;7bf281cf3991:37577 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/16453979094e4e758111b08bb5afb66c is 69, key is 7bf281cf3991,34477,1731497263757/rs:state/1731497264008/Put/seqid=0 2024-11-13T11:28:26,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36637 is added to blk_1073741897_1083 (size=5224) 2024-11-13T11:28:26,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741897_1083 (size=5224) 2024-11-13T11:28:26,974 INFO [M:0;7bf281cf3991:37577 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/16453979094e4e758111b08bb5afb66c 2024-11-13T11:28:26,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34477-0x10038d6deb20001, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:28:26,989 INFO [RS:0;7bf281cf3991:34477 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T11:28:26,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34477-0x10038d6deb20001, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:28:26,989 INFO [RS:0;7bf281cf3991:34477 {}] regionserver.HRegionServer(1031): Exiting; stopping=7bf281cf3991,34477,1731497263757; zookeeper connection closed. 2024-11-13T11:28:26,989 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6338c750 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6338c750 2024-11-13T11:28:26,990 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-13T11:28:26,992 DEBUG [M:0;7bf281cf3991:37577 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/cc00304f163e43caa1d7dcf66fc87bd3 is 52, key is load_balancer_on/state:d/1731497264695/Put/seqid=0 2024-11-13T11:28:26,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36637 is added to blk_1073741898_1084 (size=5056) 2024-11-13T11:28:26,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741898_1084 (size=5056) 2024-11-13T11:28:26,998 INFO [M:0;7bf281cf3991:37577 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/cc00304f163e43caa1d7dcf66fc87bd3 2024-11-13T11:28:27,003 DEBUG [M:0;7bf281cf3991:37577 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8ce1705115c44677adcb2326b424000c as hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8ce1705115c44677adcb2326b424000c 2024-11-13T11:28:27,007 INFO [M:0;7bf281cf3991:37577 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8ce1705115c44677adcb2326b424000c, entries=8, sequenceid=60, filesize=5.5 K 2024-11-13T11:28:27,008 DEBUG [M:0;7bf281cf3991:37577 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/84e24c9e3717462ba15f3466cb15fb0b as hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/84e24c9e3717462ba15f3466cb15fb0b 2024-11-13T11:28:27,013 INFO [M:0;7bf281cf3991:37577 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 84e24c9e3717462ba15f3466cb15fb0b 2024-11-13T11:28:27,013 INFO [M:0;7bf281cf3991:37577 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/84e24c9e3717462ba15f3466cb15fb0b, entries=6, sequenceid=60, filesize=6.1 K 2024-11-13T11:28:27,014 DEBUG [M:0;7bf281cf3991:37577 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/16453979094e4e758111b08bb5afb66c as hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/16453979094e4e758111b08bb5afb66c 2024-11-13T11:28:27,019 INFO [M:0;7bf281cf3991:37577 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/16453979094e4e758111b08bb5afb66c, entries=2, sequenceid=60, filesize=5.1 K 2024-11-13T11:28:27,020 DEBUG [M:0;7bf281cf3991:37577 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/cc00304f163e43caa1d7dcf66fc87bd3 as hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/cc00304f163e43caa1d7dcf66fc87bd3 2024-11-13T11:28:27,025 INFO [M:0;7bf281cf3991:37577 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/cc00304f163e43caa1d7dcf66fc87bd3, entries=1, sequenceid=60, filesize=4.9 K 2024-11-13T11:28:27,026 INFO [M:0;7bf281cf3991:37577 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=60, compaction requested=false 2024-11-13T11:28:27,027 INFO [M:0;7bf281cf3991:37577 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:28:27,027 DEBUG [M:0;7bf281cf3991:37577 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731497306904Disabling compacts and flushes for region at 1731497306904Disabling writes for close at 1731497306904Obtaining lock to block concurrent updates at 1731497306904Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731497306904Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731497306904Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731497306905 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731497306905Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731497306920 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731497306920Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731497306932 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731497306945 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731497306945Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731497306956 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731497306968 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731497306969 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731497306979 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731497306992 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731497306992Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@592b6f44: reopening flushed file at 1731497307002 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@211c1957: reopening flushed file at 1731497307008 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c61c185: reopening flushed file at 1731497307013 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16071907: reopening flushed file at 1731497307019 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=60, compaction requested=false at 1731497307026 (+7 ms)Writing region close event to WAL at 1731497307027 (+1 ms)Closed at 1731497307027 2024-11-13T11:28:27,028 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:27,028 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:27,028 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:27,028 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:27,028 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:27,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741878_1061 (size=1045) 2024-11-13T11:28:27,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36637 is added to blk_1073741878_1061 (size=1045) 2024-11-13T11:28:27,030 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T11:28:27,030 INFO [M:0;7bf281cf3991:37577 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T11:28:27,030 INFO [M:0;7bf281cf3991:37577 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37577 2024-11-13T11:28:27,031 INFO [M:0;7bf281cf3991:37577 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T11:28:27,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:28:27,132 INFO [M:0;7bf281cf3991:37577 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T11:28:27,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37577-0x10038d6deb20000, quorum=127.0.0.1:59956, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:28:27,138 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6729e823{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:28:27,139 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1a5f90bf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:28:27,139 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:28:27,140 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1eb85add{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:28:27,140 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61a456cd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/hadoop.log.dir/,STOPPED} 2024-11-13T11:28:27,143 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:28:27,143 WARN [BP-1464557415-172.17.0.2-1731497263081 heartbeating to localhost/127.0.0.1:35763 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:28:27,143 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:28:27,143 WARN [BP-1464557415-172.17.0.2-1731497263081 heartbeating to localhost/127.0.0.1:35763 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1464557415-172.17.0.2-1731497263081 (Datanode Uuid d6fd65b9-3d25-4348-bf57-ab1173ebd30b) service to localhost/127.0.0.1:35763 2024-11-13T11:28:27,142 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4c1bae62 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1464557415-172.17.0.2-1731497263081:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:45561,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:37897 , LocalHost:localPort 7bf281cf3991/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-13T11:28:27,143 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4c1bae62 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1464557415-172.17.0.2-1731497263081:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:36637,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1464557415-172.17.0.2-1731497263081 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:27,144 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4c1bae62 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1464557415-172.17.0.2-1731497263081:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:45561,null,null], DatanodeInfoWithStorage[127.0.0.1:36637,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1464557415-172.17.0.2-1731497263081:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:45561,null,null], DatanodeInfoWithStorage[127.0.0.1:36637,null,null]] 2024-11-13T11:28:27,144 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4c1bae62 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1464557415-172.17.0.2-1731497263081:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:45561,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1464557415-172.17.0.2-1731497263081 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:27,144 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4c1bae62 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1464557415-172.17.0.2-1731497263081:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:36637,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1464557415-172.17.0.2-1731497263081 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:27,144 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data3/current/BP-1464557415-172.17.0.2-1731497263081 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:28:27,144 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4c1bae62 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1464557415-172.17.0.2-1731497263081:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:45561,null,null], DatanodeInfoWithStorage[127.0.0.1:36637,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1464557415-172.17.0.2-1731497263081:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:45561,null,null], DatanodeInfoWithStorage[127.0.0.1:36637,null,null]] 2024-11-13T11:28:27,144 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data4/current/BP-1464557415-172.17.0.2-1731497263081 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:28:27,144 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:28:27,146 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c0171f7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:28:27,146 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@602792b0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:28:27,147 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:28:27,147 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45910ffe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:28:27,147 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@110505ba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/hadoop.log.dir/,STOPPED} 2024-11-13T11:28:27,148 WARN [BP-1464557415-172.17.0.2-1731497263081 heartbeating to localhost/127.0.0.1:35763 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:28:27,148 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:28:27,148 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:28:27,148 WARN [BP-1464557415-172.17.0.2-1731497263081 heartbeating to localhost/127.0.0.1:35763 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1464557415-172.17.0.2-1731497263081 (Datanode Uuid c3f81e58-37b0-430e-8b46-34cc3ca39932) service to localhost/127.0.0.1:35763 2024-11-13T11:28:27,148 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data9/current/BP-1464557415-172.17.0.2-1731497263081 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:28:27,148 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/cluster_1fe85d14-7d3c-b2f0-ee31-133b56595fdf/data/data10/current/BP-1464557415-172.17.0.2-1731497263081 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:28:27,149 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:28:27,153 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@46f5ce7a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T11:28:27,154 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@cb6fa13{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:28:27,154 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:28:27,154 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45fb0d0b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:28:27,154 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2893dce7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/hadoop.log.dir/,STOPPED} 2024-11-13T11:28:27,161 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T11:28:27,190 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T11:28:27,197 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=153 (was 79) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35763 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:35763 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:44029 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$895/0x00007f6c20bf5948.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35763 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:35763 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:35763 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44029 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35763 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$895/0x00007f6c20bf5948.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:35763 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35763 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:35763 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:35763 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35763 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=127 (was 192), ProcessCount=11 (was 11), AvailableMemoryMB=2826 (was 2826) 2024-11-13T11:28:27,204 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=153, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=127, ProcessCount=11, AvailableMemoryMB=2826 2024-11-13T11:28:27,204 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T11:28:27,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/hadoop.log.dir so I do NOT create it in target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53 2024-11-13T11:28:27,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/96b429f6-cc75-183e-f2a9-ea3fbd26ada0/hadoop.tmp.dir so I do NOT create it in target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53 2024-11-13T11:28:27,205 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/cluster_fa1b88cd-72d9-e50c-8179-5c107543bce8, deleteOnExit=true 2024-11-13T11:28:27,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T11:28:27,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/test.cache.data in system properties and HBase conf 2024-11-13T11:28:27,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T11:28:27,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/hadoop.log.dir in system properties and HBase conf 2024-11-13T11:28:27,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T11:28:27,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T11:28:27,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T11:28:27,205 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T11:28:27,206 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T11:28:27,206 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T11:28:27,206 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T11:28:27,206 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T11:28:27,206 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T11:28:27,206 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T11:28:27,206 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T11:28:27,206 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T11:28:27,206 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T11:28:27,207 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/nfs.dump.dir in system properties and HBase conf 2024-11-13T11:28:27,207 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/java.io.tmpdir in system properties and HBase conf 2024-11-13T11:28:27,207 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T11:28:27,207 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T11:28:27,207 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T11:28:27,218 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T11:28:27,218 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T11:28:27,229 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:27,229 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:27,229 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:27,230 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:27,230 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:27,230 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:27,233 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:27,234 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:27,234 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:27,236 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:27,285 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:28:27,289 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:28:27,291 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:28:27,291 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:28:27,291 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T11:28:27,292 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:28:27,292 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24dba92e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:28:27,293 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37aeaf30{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:28:27,387 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2fe3cc5{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/java.io.tmpdir/jetty-localhost-40093-hadoop-hdfs-3_4_1-tests_jar-_-any-5066858877549235701/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T11:28:27,388 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@512c947f{HTTP/1.1, (http/1.1)}{localhost:40093} 2024-11-13T11:28:27,388 INFO [Time-limited test {}] server.Server(415): Started @149549ms 2024-11-13T11:28:27,400 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T11:28:27,446 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:28:27,450 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:28:27,451 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:28:27,451 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:28:27,451 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T11:28:27,452 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77877c2e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:28:27,452 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b6c7e58{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:28:27,546 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@520d4f99{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/java.io.tmpdir/jetty-localhost-38807-hadoop-hdfs-3_4_1-tests_jar-_-any-18289763052385931535/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:28:27,547 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@798b58c4{HTTP/1.1, (http/1.1)}{localhost:38807} 2024-11-13T11:28:27,547 INFO [Time-limited test {}] server.Server(415): Started @149708ms 2024-11-13T11:28:27,548 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:28:27,574 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:28:27,579 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:28:27,580 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:28:27,581 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:28:27,581 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T11:28:27,581 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a2c549c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:28:27,582 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@245efd98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:28:27,607 WARN [Thread-1180 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/cluster_fa1b88cd-72d9-e50c-8179-5c107543bce8/data/data1/current/BP-2062197654-172.17.0.2-1731497307248/current, will proceed with Du for space computation calculation, 2024-11-13T11:28:27,608 WARN [Thread-1181 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/cluster_fa1b88cd-72d9-e50c-8179-5c107543bce8/data/data2/current/BP-2062197654-172.17.0.2-1731497307248/current, will proceed with Du for space computation calculation, 2024-11-13T11:28:27,627 WARN [Thread-1159 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:28:27,629 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x96baff1fd35f7772 with lease ID 0xc90b1c18e7d659cf: Processing first storage report for DS-9b7231eb-85a6-4b16-a665-68c48d6e469e from datanode DatanodeRegistration(127.0.0.1:46509, datanodeUuid=949331d5-d3b4-4314-a459-0cb433807dfb, infoPort=35161, infoSecurePort=0, ipcPort=43077, storageInfo=lv=-57;cid=testClusterID;nsid=1880148483;c=1731497307248) 2024-11-13T11:28:27,629 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96baff1fd35f7772 with lease ID 0xc90b1c18e7d659cf: from storage DS-9b7231eb-85a6-4b16-a665-68c48d6e469e node DatanodeRegistration(127.0.0.1:46509, datanodeUuid=949331d5-d3b4-4314-a459-0cb433807dfb, infoPort=35161, infoSecurePort=0, ipcPort=43077, storageInfo=lv=-57;cid=testClusterID;nsid=1880148483;c=1731497307248), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:28:27,629 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x96baff1fd35f7772 with lease ID 0xc90b1c18e7d659cf: Processing first storage report for DS-cb4ad007-ab37-46a2-8244-47f4b8c2da3d from datanode DatanodeRegistration(127.0.0.1:46509, datanodeUuid=949331d5-d3b4-4314-a459-0cb433807dfb, infoPort=35161, infoSecurePort=0, ipcPort=43077, storageInfo=lv=-57;cid=testClusterID;nsid=1880148483;c=1731497307248) 2024-11-13T11:28:27,629 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96baff1fd35f7772 with lease ID 0xc90b1c18e7d659cf: from storage DS-cb4ad007-ab37-46a2-8244-47f4b8c2da3d node DatanodeRegistration(127.0.0.1:46509, datanodeUuid=949331d5-d3b4-4314-a459-0cb433807dfb, infoPort=35161, infoSecurePort=0, ipcPort=43077, storageInfo=lv=-57;cid=testClusterID;nsid=1880148483;c=1731497307248), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:28:27,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:27,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:27,684 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5f93d049{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/java.io.tmpdir/jetty-localhost-37323-hadoop-hdfs-3_4_1-tests_jar-_-any-3915486996496639437/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:28:27,685 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7d5149ff{HTTP/1.1, (http/1.1)}{localhost:37323} 2024-11-13T11:28:27,685 INFO [Time-limited test {}] server.Server(415): Started @149846ms 2024-11-13T11:28:27,686 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:28:27,744 WARN [Thread-1206 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/cluster_fa1b88cd-72d9-e50c-8179-5c107543bce8/data/data3/current/BP-2062197654-172.17.0.2-1731497307248/current, will proceed with Du for space computation calculation, 2024-11-13T11:28:27,744 WARN [Thread-1207 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/cluster_fa1b88cd-72d9-e50c-8179-5c107543bce8/data/data4/current/BP-2062197654-172.17.0.2-1731497307248/current, will proceed with Du for space computation calculation, 2024-11-13T11:28:27,760 WARN [Thread-1195 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:28:27,762 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xacda4353a7c80124 with lease ID 0xc90b1c18e7d659d0: Processing first storage report for DS-8cb1cf42-f723-427a-b40f-d5b9ddd4c8b2 from datanode DatanodeRegistration(127.0.0.1:35025, datanodeUuid=cc5b04f4-563b-4ce8-9e4d-2accea5cddaa, infoPort=42423, infoSecurePort=0, ipcPort=41167, storageInfo=lv=-57;cid=testClusterID;nsid=1880148483;c=1731497307248) 2024-11-13T11:28:27,762 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xacda4353a7c80124 with lease ID 0xc90b1c18e7d659d0: from storage DS-8cb1cf42-f723-427a-b40f-d5b9ddd4c8b2 node DatanodeRegistration(127.0.0.1:35025, datanodeUuid=cc5b04f4-563b-4ce8-9e4d-2accea5cddaa, infoPort=42423, infoSecurePort=0, ipcPort=41167, storageInfo=lv=-57;cid=testClusterID;nsid=1880148483;c=1731497307248), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:28:27,762 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xacda4353a7c80124 with lease ID 0xc90b1c18e7d659d0: Processing first storage report for DS-1b684c56-7a5f-4b6e-b293-0a388a2c321e from datanode DatanodeRegistration(127.0.0.1:35025, datanodeUuid=cc5b04f4-563b-4ce8-9e4d-2accea5cddaa, infoPort=42423, infoSecurePort=0, ipcPort=41167, storageInfo=lv=-57;cid=testClusterID;nsid=1880148483;c=1731497307248) 2024-11-13T11:28:27,762 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xacda4353a7c80124 with lease ID 0xc90b1c18e7d659d0: from storage DS-1b684c56-7a5f-4b6e-b293-0a388a2c321e node DatanodeRegistration(127.0.0.1:35025, datanodeUuid=cc5b04f4-563b-4ce8-9e4d-2accea5cddaa, infoPort=42423, infoSecurePort=0, ipcPort=41167, storageInfo=lv=-57;cid=testClusterID;nsid=1880148483;c=1731497307248), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:28:27,811 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53 2024-11-13T11:28:27,816 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/cluster_fa1b88cd-72d9-e50c-8179-5c107543bce8/zookeeper_0, clientPort=60438, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/cluster_fa1b88cd-72d9-e50c-8179-5c107543bce8/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/cluster_fa1b88cd-72d9-e50c-8179-5c107543bce8/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T11:28:27,817 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60438 2024-11-13T11:28:27,818 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:28:27,819 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:28:27,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46509 is added to blk_1073741825_1001 (size=7) 2024-11-13T11:28:27,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741825_1001 (size=7) 2024-11-13T11:28:27,829 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea with version=8 2024-11-13T11:28:27,829 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/hbase-staging 2024-11-13T11:28:27,831 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7bf281cf3991:0 server-side Connection retries=45 2024-11-13T11:28:27,831 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:28:27,831 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T11:28:27,832 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T11:28:27,832 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:28:27,832 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T11:28:27,832 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T11:28:27,832 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T11:28:27,833 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32853 2024-11-13T11:28:27,835 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:32853 connecting to ZooKeeper ensemble=127.0.0.1:60438 2024-11-13T11:28:27,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:328530x0, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T11:28:27,838 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:32853-0x10038d78b150000 connected 2024-11-13T11:28:27,853 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:28:27,855 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:28:27,856 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:28:27,857 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea, hbase.cluster.distributed=false 2024-11-13T11:28:27,858 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T11:28:27,860 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32853 2024-11-13T11:28:27,860 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32853 2024-11-13T11:28:27,861 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32853 2024-11-13T11:28:27,861 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32853 2024-11-13T11:28:27,861 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32853 2024-11-13T11:28:27,877 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7bf281cf3991:0 server-side Connection retries=45 2024-11-13T11:28:27,877 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:28:27,877 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T11:28:27,877 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T11:28:27,877 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:28:27,877 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T11:28:27,877 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T11:28:27,877 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T11:28:27,878 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34633 2024-11-13T11:28:27,879 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34633 connecting to ZooKeeper ensemble=127.0.0.1:60438 2024-11-13T11:28:27,880 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:28:27,881 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:28:27,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:346330x0, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T11:28:27,885 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34633-0x10038d78b150001 connected 2024-11-13T11:28:27,885 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:28:27,885 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T11:28:27,886 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T11:28:27,886 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T11:28:27,887 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T11:28:27,888 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34633 2024-11-13T11:28:27,888 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34633 2024-11-13T11:28:27,888 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34633 2024-11-13T11:28:27,888 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34633 2024-11-13T11:28:27,888 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34633 2024-11-13T11:28:27,900 DEBUG [M:0;7bf281cf3991:32853 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7bf281cf3991:32853 2024-11-13T11:28:27,901 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7bf281cf3991,32853,1731497307831 2024-11-13T11:28:27,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:28:27,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:28:27,903 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7bf281cf3991,32853,1731497307831 2024-11-13T11:28:27,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T11:28:27,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:28:27,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:28:27,904 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T11:28:27,904 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7bf281cf3991,32853,1731497307831 from backup master directory 2024-11-13T11:28:27,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7bf281cf3991,32853,1731497307831 2024-11-13T11:28:27,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:28:27,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:28:27,905 WARN [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T11:28:27,905 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7bf281cf3991,32853,1731497307831 2024-11-13T11:28:27,909 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/hbase.id] with ID: c0d03bc7-a25c-426e-8a3f-a13cfc09257f 2024-11-13T11:28:27,909 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/.tmp/hbase.id 2024-11-13T11:28:27,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46509 is added to blk_1073741826_1002 (size=42) 2024-11-13T11:28:27,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741826_1002 (size=42) 2024-11-13T11:28:27,915 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/.tmp/hbase.id]:[hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/hbase.id] 2024-11-13T11:28:27,927 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:28:27,928 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T11:28:27,929 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-13T11:28:27,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:28:27,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:28:27,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741827_1003 (size=196) 2024-11-13T11:28:27,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46509 is added to blk_1073741827_1003 (size=196) 2024-11-13T11:28:27,938 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T11:28:27,939 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T11:28:27,939 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:28:27,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46509 is added to blk_1073741828_1004 (size=1189) 2024-11-13T11:28:27,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741828_1004 (size=1189) 2024-11-13T11:28:27,947 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store 2024-11-13T11:28:27,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741829_1005 (size=34) 2024-11-13T11:28:27,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46509 is added to blk_1073741829_1005 (size=34) 2024-11-13T11:28:27,954 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:28:27,954 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T11:28:27,954 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:28:27,954 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:28:27,954 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T11:28:27,954 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:28:27,954 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:28:27,954 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731497307954Disabling compacts and flushes for region at 1731497307954Disabling writes for close at 1731497307954Writing region close event to WAL at 1731497307954Closed at 1731497307954 2024-11-13T11:28:27,955 WARN [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/.initializing 2024-11-13T11:28:27,955 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/WALs/7bf281cf3991,32853,1731497307831 2024-11-13T11:28:27,957 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C32853%2C1731497307831, suffix=, logDir=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/WALs/7bf281cf3991,32853,1731497307831, archiveDir=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/oldWALs, maxLogs=10 2024-11-13T11:28:27,957 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C32853%2C1731497307831.1731497307957 2024-11-13T11:28:27,963 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/WALs/7bf281cf3991,32853,1731497307831/7bf281cf3991%2C32853%2C1731497307831.1731497307957 2024-11-13T11:28:27,964 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42423:42423),(127.0.0.1/127.0.0.1:35161:35161)] 2024-11-13T11:28:27,964 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T11:28:27,964 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:28:27,965 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:28:27,965 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:28:27,968 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:28:27,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T11:28:27,969 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:28:27,970 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:28:27,970 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:28:27,971 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T11:28:27,971 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:28:27,972 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:28:27,972 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:28:27,973 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T11:28:27,973 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:28:27,974 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:28:27,974 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:28:27,975 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T11:28:27,975 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:28:27,975 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:28:27,976 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:28:27,976 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:28:27,976 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:28:27,978 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:28:27,978 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:28:27,978 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T11:28:27,979 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:28:27,981 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T11:28:27,982 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=717516, jitterRate=-0.08763226866722107}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T11:28:27,983 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731497307965Initializing all the Stores at 1731497307966 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497307966Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497307968 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497307968Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497307968Cleaning up temporary data from old regions at 1731497307978 (+10 ms)Region opened successfully at 1731497307983 (+5 ms) 2024-11-13T11:28:27,983 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T11:28:27,986 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cbede56, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7bf281cf3991/172.17.0.2:0 2024-11-13T11:28:27,987 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T11:28:27,987 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T11:28:27,987 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T11:28:27,987 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T11:28:27,988 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T11:28:27,988 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T11:28:27,988 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T11:28:27,990 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T11:28:27,991 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T11:28:27,992 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T11:28:27,992 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T11:28:27,992 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T11:28:27,993 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T11:28:27,993 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T11:28:27,994 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T11:28:27,995 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T11:28:27,996 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T11:28:27,996 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T11:28:27,998 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T11:28:27,998 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T11:28:28,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T11:28:28,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:28:28,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T11:28:28,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:28:28,000 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7bf281cf3991,32853,1731497307831, sessionid=0x10038d78b150000, setting cluster-up flag (Was=false) 2024-11-13T11:28:28,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:28:28,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:28:28,004 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T11:28:28,005 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7bf281cf3991,32853,1731497307831 2024-11-13T11:28:28,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:28:28,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:28:28,010 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T11:28:28,011 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7bf281cf3991,32853,1731497307831 2024-11-13T11:28:28,012 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T11:28:28,013 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T11:28:28,013 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T11:28:28,014 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T11:28:28,014 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7bf281cf3991,32853,1731497307831 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T11:28:28,017 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:28:28,017 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:28:28,017 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:28:28,017 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:28:28,017 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7bf281cf3991:0, corePoolSize=10, maxPoolSize=10 2024-11-13T11:28:28,017 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:28:28,018 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7bf281cf3991:0, corePoolSize=2, maxPoolSize=2 2024-11-13T11:28:28,018 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:28:28,019 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:28:28,019 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T11:28:28,020 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:28:28,020 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T11:28:28,022 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731497338022 2024-11-13T11:28:28,022 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T11:28:28,022 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T11:28:28,022 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T11:28:28,023 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T11:28:28,023 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T11:28:28,023 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T11:28:28,023 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,023 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T11:28:28,023 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T11:28:28,023 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T11:28:28,024 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T11:28:28,024 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T11:28:28,024 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497308024,5,FailOnTimeoutGroup] 2024-11-13T11:28:28,027 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497308025,5,FailOnTimeoutGroup] 2024-11-13T11:28:28,027 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,028 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T11:28:28,028 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,028 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46509 is added to blk_1073741831_1007 (size=1321) 2024-11-13T11:28:28,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741831_1007 (size=1321) 2024-11-13T11:28:28,030 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T11:28:28,030 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea 2024-11-13T11:28:28,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741832_1008 (size=32) 2024-11-13T11:28:28,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46509 is added to blk_1073741832_1008 (size=32) 2024-11-13T11:28:28,037 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:28:28,038 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T11:28:28,040 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T11:28:28,040 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:28:28,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:28:28,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T11:28:28,042 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T11:28:28,042 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:28:28,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:28:28,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T11:28:28,044 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T11:28:28,044 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:28:28,044 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:28:28,044 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T11:28:28,045 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T11:28:28,046 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:28:28,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:28:28,046 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T11:28:28,047 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740 2024-11-13T11:28:28,047 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740 2024-11-13T11:28:28,048 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T11:28:28,048 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T11:28:28,049 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T11:28:28,050 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T11:28:28,051 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T11:28:28,052 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=765070, jitterRate=-0.02716389298439026}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T11:28:28,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731497308037Initializing all the Stores at 1731497308038 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497308038Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497308038Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497308038Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497308038Cleaning up temporary data from old regions at 1731497308048 (+10 ms)Region opened successfully at 1731497308052 (+4 ms) 2024-11-13T11:28:28,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T11:28:28,053 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T11:28:28,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T11:28:28,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T11:28:28,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T11:28:28,053 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T11:28:28,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731497308053Disabling compacts and flushes for region at 1731497308053Disabling writes for close at 1731497308053Writing region close event to WAL at 1731497308053Closed at 1731497308053 2024-11-13T11:28:28,054 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:28:28,054 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T11:28:28,055 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T11:28:28,056 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T11:28:28,057 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T11:28:28,090 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.HRegionServer(746): ClusterId : c0d03bc7-a25c-426e-8a3f-a13cfc09257f 2024-11-13T11:28:28,090 DEBUG [RS:0;7bf281cf3991:34633 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T11:28:28,092 DEBUG [RS:0;7bf281cf3991:34633 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T11:28:28,092 DEBUG [RS:0;7bf281cf3991:34633 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T11:28:28,093 DEBUG [RS:0;7bf281cf3991:34633 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T11:28:28,093 DEBUG [RS:0;7bf281cf3991:34633 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6852f4c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7bf281cf3991/172.17.0.2:0 2024-11-13T11:28:28,105 DEBUG [RS:0;7bf281cf3991:34633 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7bf281cf3991:34633 2024-11-13T11:28:28,105 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T11:28:28,105 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T11:28:28,105 DEBUG [RS:0;7bf281cf3991:34633 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T11:28:28,106 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.HRegionServer(2659): reportForDuty to master=7bf281cf3991,32853,1731497307831 with port=34633, startcode=1731497307877 2024-11-13T11:28:28,106 DEBUG [RS:0;7bf281cf3991:34633 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T11:28:28,108 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55111, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T11:28:28,108 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32853 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7bf281cf3991,34633,1731497307877 2024-11-13T11:28:28,108 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32853 {}] master.ServerManager(517): Registering regionserver=7bf281cf3991,34633,1731497307877 2024-11-13T11:28:28,110 DEBUG [RS:0;7bf281cf3991:34633 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea 2024-11-13T11:28:28,110 DEBUG [RS:0;7bf281cf3991:34633 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43265 2024-11-13T11:28:28,110 DEBUG [RS:0;7bf281cf3991:34633 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T11:28:28,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T11:28:28,112 DEBUG [RS:0;7bf281cf3991:34633 {}] zookeeper.ZKUtil(111): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7bf281cf3991,34633,1731497307877 2024-11-13T11:28:28,112 WARN [RS:0;7bf281cf3991:34633 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T11:28:28,112 INFO [RS:0;7bf281cf3991:34633 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:28:28,112 DEBUG [RS:0;7bf281cf3991:34633 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877 2024-11-13T11:28:28,112 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7bf281cf3991,34633,1731497307877] 2024-11-13T11:28:28,116 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T11:28:28,118 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T11:28:28,118 INFO [RS:0;7bf281cf3991:34633 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T11:28:28,118 INFO [RS:0;7bf281cf3991:34633 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,121 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T11:28:28,122 INFO [RS:0;7bf281cf3991:34633 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T11:28:28,122 INFO [RS:0;7bf281cf3991:34633 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,122 DEBUG [RS:0;7bf281cf3991:34633 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:28:28,122 DEBUG [RS:0;7bf281cf3991:34633 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:28:28,122 DEBUG [RS:0;7bf281cf3991:34633 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:28:28,122 DEBUG [RS:0;7bf281cf3991:34633 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:28:28,122 DEBUG [RS:0;7bf281cf3991:34633 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:28:28,123 DEBUG [RS:0;7bf281cf3991:34633 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7bf281cf3991:0, corePoolSize=2, maxPoolSize=2 2024-11-13T11:28:28,123 DEBUG [RS:0;7bf281cf3991:34633 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:28:28,123 DEBUG [RS:0;7bf281cf3991:34633 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:28:28,123 DEBUG [RS:0;7bf281cf3991:34633 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:28:28,123 DEBUG [RS:0;7bf281cf3991:34633 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:28:28,123 DEBUG [RS:0;7bf281cf3991:34633 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:28:28,123 DEBUG [RS:0;7bf281cf3991:34633 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:28:28,123 DEBUG [RS:0;7bf281cf3991:34633 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7bf281cf3991:0, corePoolSize=3, maxPoolSize=3 2024-11-13T11:28:28,123 DEBUG [RS:0;7bf281cf3991:34633 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0, corePoolSize=3, maxPoolSize=3 2024-11-13T11:28:28,125 INFO [RS:0;7bf281cf3991:34633 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,125 INFO [RS:0;7bf281cf3991:34633 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,126 INFO [RS:0;7bf281cf3991:34633 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,126 INFO [RS:0;7bf281cf3991:34633 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,126 INFO [RS:0;7bf281cf3991:34633 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,126 INFO [RS:0;7bf281cf3991:34633 {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,34633,1731497307877-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T11:28:28,140 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T11:28:28,140 INFO [RS:0;7bf281cf3991:34633 {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,34633,1731497307877-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,140 INFO [RS:0;7bf281cf3991:34633 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,140 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.Replication(171): 7bf281cf3991,34633,1731497307877 started 2024-11-13T11:28:28,153 INFO [RS:0;7bf281cf3991:34633 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,153 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.HRegionServer(1482): Serving as 7bf281cf3991,34633,1731497307877, RpcServer on 7bf281cf3991/172.17.0.2:34633, sessionid=0x10038d78b150001 2024-11-13T11:28:28,154 DEBUG [RS:0;7bf281cf3991:34633 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T11:28:28,154 DEBUG [RS:0;7bf281cf3991:34633 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7bf281cf3991,34633,1731497307877 2024-11-13T11:28:28,154 DEBUG [RS:0;7bf281cf3991:34633 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7bf281cf3991,34633,1731497307877' 2024-11-13T11:28:28,154 DEBUG [RS:0;7bf281cf3991:34633 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T11:28:28,154 DEBUG [RS:0;7bf281cf3991:34633 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T11:28:28,155 DEBUG [RS:0;7bf281cf3991:34633 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T11:28:28,155 DEBUG [RS:0;7bf281cf3991:34633 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T11:28:28,155 DEBUG [RS:0;7bf281cf3991:34633 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7bf281cf3991,34633,1731497307877 2024-11-13T11:28:28,155 DEBUG [RS:0;7bf281cf3991:34633 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7bf281cf3991,34633,1731497307877' 2024-11-13T11:28:28,155 DEBUG [RS:0;7bf281cf3991:34633 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T11:28:28,155 DEBUG [RS:0;7bf281cf3991:34633 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T11:28:28,156 DEBUG [RS:0;7bf281cf3991:34633 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T11:28:28,156 INFO [RS:0;7bf281cf3991:34633 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T11:28:28,156 INFO [RS:0;7bf281cf3991:34633 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T11:28:28,207 WARN [7bf281cf3991:32853 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T11:28:28,258 INFO [RS:0;7bf281cf3991:34633 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C34633%2C1731497307877, suffix=, logDir=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877, archiveDir=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/oldWALs, maxLogs=32 2024-11-13T11:28:28,258 INFO [RS:0;7bf281cf3991:34633 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C34633%2C1731497307877.1731497308258 2024-11-13T11:28:28,265 INFO [RS:0;7bf281cf3991:34633 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497308258 2024-11-13T11:28:28,266 DEBUG [RS:0;7bf281cf3991:34633 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42423:42423),(127.0.0.1/127.0.0.1:35161:35161)] 2024-11-13T11:28:28,457 DEBUG [7bf281cf3991:32853 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T11:28:28,458 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7bf281cf3991,34633,1731497307877 2024-11-13T11:28:28,460 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7bf281cf3991,34633,1731497307877, state=OPENING 2024-11-13T11:28:28,461 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T11:28:28,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:28:28,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:28:28,465 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T11:28:28,465 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:28:28,465 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:28:28,465 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7bf281cf3991,34633,1731497307877}] 2024-11-13T11:28:28,619 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T11:28:28,620 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40815, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T11:28:28,624 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T11:28:28,624 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:28:28,626 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C34633%2C1731497307877.meta, suffix=.meta, logDir=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877, archiveDir=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/oldWALs, maxLogs=32 2024-11-13T11:28:28,626 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C34633%2C1731497307877.meta.1731497308626.meta 2024-11-13T11:28:28,632 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.meta.1731497308626.meta 2024-11-13T11:28:28,634 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35161:35161),(127.0.0.1/127.0.0.1:42423:42423)] 2024-11-13T11:28:28,635 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T11:28:28,636 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T11:28:28,636 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T11:28:28,636 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T11:28:28,636 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T11:28:28,636 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:28:28,636 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T11:28:28,636 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T11:28:28,637 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T11:28:28,638 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T11:28:28,638 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:28:28,639 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:28:28,639 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T11:28:28,640 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T11:28:28,640 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:28:28,640 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:28:28,640 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T11:28:28,641 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T11:28:28,641 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:28:28,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:28:28,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T11:28:28,642 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T11:28:28,642 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:28:28,643 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:28:28,643 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T11:28:28,644 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740 2024-11-13T11:28:28,645 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740 2024-11-13T11:28:28,646 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T11:28:28,646 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T11:28:28,646 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T11:28:28,647 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T11:28:28,648 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=876221, jitterRate=0.11417321860790253}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T11:28:28,648 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T11:28:28,649 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731497308636Writing region info on filesystem at 1731497308636Initializing all the Stores at 1731497308637 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497308637Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497308637Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497308637Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497308637Cleaning up temporary data from old regions at 1731497308646 (+9 ms)Running coprocessor post-open hooks at 1731497308648 (+2 ms)Region opened successfully at 1731497308649 (+1 ms) 2024-11-13T11:28:28,650 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731497308619 2024-11-13T11:28:28,652 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T11:28:28,652 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T11:28:28,653 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7bf281cf3991,34633,1731497307877 2024-11-13T11:28:28,654 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7bf281cf3991,34633,1731497307877, state=OPEN 2024-11-13T11:28:28,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T11:28:28,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T11:28:28,656 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7bf281cf3991,34633,1731497307877 2024-11-13T11:28:28,656 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:28:28,656 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:28:28,658 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T11:28:28,658 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7bf281cf3991,34633,1731497307877 in 191 msec 2024-11-13T11:28:28,661 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T11:28:28,661 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 604 msec 2024-11-13T11:28:28,662 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:28:28,662 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T11:28:28,663 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T11:28:28,663 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7bf281cf3991,34633,1731497307877, seqNum=-1] 2024-11-13T11:28:28,663 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T11:28:28,664 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53837, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T11:28:28,670 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 656 msec 2024-11-13T11:28:28,670 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731497308670, completionTime=-1 2024-11-13T11:28:28,670 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T11:28:28,671 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T11:28:28,673 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T11:28:28,673 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731497368673 2024-11-13T11:28:28,673 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731497428673 2024-11-13T11:28:28,673 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-13T11:28:28,673 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,32853,1731497307831-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,673 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,32853,1731497307831-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,673 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,32853,1731497307831-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,673 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7bf281cf3991:32853, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,673 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,674 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:28,675 DEBUG [master/7bf281cf3991:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T11:28:28,677 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.772sec 2024-11-13T11:28:28,677 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T11:28:28,677 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T11:28:28,677 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T11:28:28,677 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T11:28:28,677 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T11:28:28,677 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,32853,1731497307831-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T11:28:28,677 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,32853,1731497307831-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T11:28:28,680 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T11:28:28,680 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T11:28:28,680 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,32853,1731497307831-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:28:28,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:28,691 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d513dbf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:28:28,691 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7bf281cf3991,32853,-1 for getting cluster id 2024-11-13T11:28:28,691 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T11:28:28,693 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c0d03bc7-a25c-426e-8a3f-a13cfc09257f' 2024-11-13T11:28:28,693 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T11:28:28,693 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c0d03bc7-a25c-426e-8a3f-a13cfc09257f" 2024-11-13T11:28:28,693 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73dfb030, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:28:28,693 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7bf281cf3991,32853,-1] 2024-11-13T11:28:28,694 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T11:28:28,694 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:28:28,695 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44204, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T11:28:28,696 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6abc8528, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:28:28,696 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T11:28:28,697 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7bf281cf3991,34633,1731497307877, seqNum=-1] 2024-11-13T11:28:28,697 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T11:28:28,699 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52692, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T11:28:28,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7bf281cf3991,32853,1731497307831 2024-11-13T11:28:28,701 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:28:28,703 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T11:28:28,703 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-13T11:28:28,703 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-13T11:28:28,703 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T11:28:28,704 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 7bf281cf3991,32853,1731497307831 2024-11-13T11:28:28,704 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@63126704 2024-11-13T11:28:28,704 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T11:28:28,707 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44218, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T11:28:28,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32853 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-13T11:28:28,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32853 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-13T11:28:28,707 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32853 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T11:28:28,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32853 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-13T11:28:28,710 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T11:28:28,710 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:28:28,710 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32853 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-13T11:28:28,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32853 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T11:28:28,711 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T11:28:28,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46509 is added to blk_1073741835_1011 (size=395) 2024-11-13T11:28:28,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741835_1011 (size=395) 2024-11-13T11:28:28,720 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b4b0fc9ac154722e9a6dbdec8f25692b, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea 2024-11-13T11:28:28,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741836_1012 (size=78) 2024-11-13T11:28:28,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46509 is added to blk_1073741836_1012 (size=78) 2024-11-13T11:28:28,727 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:28:28,727 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing b4b0fc9ac154722e9a6dbdec8f25692b, disabling compactions & flushes 2024-11-13T11:28:28,727 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b. 2024-11-13T11:28:28,727 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b. 2024-11-13T11:28:28,727 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b. after waiting 0 ms 2024-11-13T11:28:28,727 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b. 2024-11-13T11:28:28,727 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b. 2024-11-13T11:28:28,728 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for b4b0fc9ac154722e9a6dbdec8f25692b: Waiting for close lock at 1731497308727Disabling compacts and flushes for region at 1731497308727Disabling writes for close at 1731497308727Writing region close event to WAL at 1731497308727Closed at 1731497308727 2024-11-13T11:28:28,729 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T11:28:28,730 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731497308729"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731497308729"}]},"ts":"1731497308729"} 2024-11-13T11:28:28,732 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T11:28:28,733 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T11:28:28,734 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731497308733"}]},"ts":"1731497308733"} 2024-11-13T11:28:28,736 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-13T11:28:28,736 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=b4b0fc9ac154722e9a6dbdec8f25692b, ASSIGN}] 2024-11-13T11:28:28,738 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=b4b0fc9ac154722e9a6dbdec8f25692b, ASSIGN 2024-11-13T11:28:28,739 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=b4b0fc9ac154722e9a6dbdec8f25692b, ASSIGN; state=OFFLINE, location=7bf281cf3991,34633,1731497307877; forceNewPlan=false, retain=false 2024-11-13T11:28:28,889 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b4b0fc9ac154722e9a6dbdec8f25692b, regionState=OPENING, regionLocation=7bf281cf3991,34633,1731497307877 2024-11-13T11:28:28,893 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=b4b0fc9ac154722e9a6dbdec8f25692b, ASSIGN because future has completed 2024-11-13T11:28:28,894 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b4b0fc9ac154722e9a6dbdec8f25692b, server=7bf281cf3991,34633,1731497307877}] 2024-11-13T11:28:29,057 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b. 2024-11-13T11:28:29,058 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => b4b0fc9ac154722e9a6dbdec8f25692b, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b.', STARTKEY => '', ENDKEY => ''} 2024-11-13T11:28:29,058 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart b4b0fc9ac154722e9a6dbdec8f25692b 2024-11-13T11:28:29,058 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:28:29,059 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for b4b0fc9ac154722e9a6dbdec8f25692b 2024-11-13T11:28:29,059 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for b4b0fc9ac154722e9a6dbdec8f25692b 2024-11-13T11:28:29,061 INFO [StoreOpener-b4b0fc9ac154722e9a6dbdec8f25692b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b4b0fc9ac154722e9a6dbdec8f25692b 2024-11-13T11:28:29,063 INFO [StoreOpener-b4b0fc9ac154722e9a6dbdec8f25692b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b4b0fc9ac154722e9a6dbdec8f25692b columnFamilyName info 2024-11-13T11:28:29,063 DEBUG [StoreOpener-b4b0fc9ac154722e9a6dbdec8f25692b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:28:29,064 INFO [StoreOpener-b4b0fc9ac154722e9a6dbdec8f25692b-1 {}] regionserver.HStore(327): Store=b4b0fc9ac154722e9a6dbdec8f25692b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:28:29,064 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for b4b0fc9ac154722e9a6dbdec8f25692b 2024-11-13T11:28:29,065 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/default/TestLogRolling-testLogRollOnPipelineRestart/b4b0fc9ac154722e9a6dbdec8f25692b 2024-11-13T11:28:29,066 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/default/TestLogRolling-testLogRollOnPipelineRestart/b4b0fc9ac154722e9a6dbdec8f25692b 2024-11-13T11:28:29,067 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for b4b0fc9ac154722e9a6dbdec8f25692b 2024-11-13T11:28:29,067 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for b4b0fc9ac154722e9a6dbdec8f25692b 2024-11-13T11:28:29,070 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for b4b0fc9ac154722e9a6dbdec8f25692b 2024-11-13T11:28:29,072 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/default/TestLogRolling-testLogRollOnPipelineRestart/b4b0fc9ac154722e9a6dbdec8f25692b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T11:28:29,073 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened b4b0fc9ac154722e9a6dbdec8f25692b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=832910, jitterRate=0.05910049378871918}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T11:28:29,073 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b4b0fc9ac154722e9a6dbdec8f25692b 2024-11-13T11:28:29,074 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for b4b0fc9ac154722e9a6dbdec8f25692b: Running coprocessor pre-open hook at 1731497309059Writing region info on filesystem at 1731497309059Initializing all the Stores at 1731497309060 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497309060Cleaning up temporary data from old regions at 1731497309067 (+7 ms)Running coprocessor post-open hooks at 1731497309073 (+6 ms)Region opened successfully at 1731497309074 (+1 ms) 2024-11-13T11:28:29,075 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b., pid=6, masterSystemTime=1731497309050 2024-11-13T11:28:29,077 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b. 2024-11-13T11:28:29,077 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b. 2024-11-13T11:28:29,078 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b4b0fc9ac154722e9a6dbdec8f25692b, regionState=OPEN, openSeqNum=2, regionLocation=7bf281cf3991,34633,1731497307877 2024-11-13T11:28:29,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b4b0fc9ac154722e9a6dbdec8f25692b, server=7bf281cf3991,34633,1731497307877 because future has completed 2024-11-13T11:28:29,084 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T11:28:29,084 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure b4b0fc9ac154722e9a6dbdec8f25692b, server=7bf281cf3991,34633,1731497307877 in 188 msec 2024-11-13T11:28:29,087 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T11:28:29,087 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=b4b0fc9ac154722e9a6dbdec8f25692b, ASSIGN in 348 msec 2024-11-13T11:28:29,088 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T11:28:29,088 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731497309088"}]},"ts":"1731497309088"} 2024-11-13T11:28:29,090 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-13T11:28:29,091 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T11:28:29,093 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 383 msec 2024-11-13T11:28:29,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:29,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:30,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:30,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:31,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:31,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:32,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:32,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:33,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:33,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:34,140 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T11:28:34,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:34,158 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:34,158 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:34,158 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:34,159 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:34,159 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:34,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:34,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:34,163 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:34,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:34,169 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T11:28:34,169 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T11:28:34,170 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-13T11:28:34,170 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-13T11:28:34,170 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T11:28:34,170 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T11:28:34,171 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T11:28:34,171 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-13T11:28:34,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:34,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:35,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:35,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:36,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:36,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:37,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:37,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:38,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:38,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:38,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32853 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T11:28:38,762 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-13T11:28:38,762 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-13T11:28:38,769 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-13T11:28:38,770 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b. 2024-11-13T11:28:38,773 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b., hostname=7bf281cf3991,34633,1731497307877, seqNum=2] 2024-11-13T11:28:39,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:39,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:40,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:40,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:40,776 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497308258 2024-11-13T11:28:40,777 WARN [ResponseProcessor for block BP-2062197654-172.17.0.2-1731497307248:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2062197654-172.17.0.2-1731497307248:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:40,777 WARN [ResponseProcessor for block BP-2062197654-172.17.0.2-1731497307248:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2062197654-172.17.0.2-1731497307248:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:40,782 WARN [ResponseProcessor for block BP-2062197654-172.17.0.2-1731497307248:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2062197654-172.17.0.2-1731497307248:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-2062197654-172.17.0.2-1731497307248:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:35025,DS-8cb1cf42-f723-427a-b40f-d5b9ddd4c8b2,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:40,782 WARN [DataStreamer for file /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.meta.1731497308626.meta block BP-2062197654-172.17.0.2-1731497307248:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2062197654-172.17.0.2-1731497307248:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46509,DS-9b7231eb-85a6-4b16-a665-68c48d6e469e,DISK], DatanodeInfoWithStorage[127.0.0.1:35025,DS-8cb1cf42-f723-427a-b40f-d5b9ddd4c8b2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35025,DS-8cb1cf42-f723-427a-b40f-d5b9ddd4c8b2,DISK]) is bad. 2024-11-13T11:28:40,782 WARN [DataStreamer for file /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/WALs/7bf281cf3991,32853,1731497307831/7bf281cf3991%2C32853%2C1731497307831.1731497307957 block BP-2062197654-172.17.0.2-1731497307248:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2062197654-172.17.0.2-1731497307248:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35025,DS-8cb1cf42-f723-427a-b40f-d5b9ddd4c8b2,DISK], DatanodeInfoWithStorage[127.0.0.1:46509,DS-9b7231eb-85a6-4b16-a665-68c48d6e469e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35025,DS-8cb1cf42-f723-427a-b40f-d5b9ddd4c8b2,DISK]) is bad. 2024-11-13T11:28:40,782 WARN [PacketResponder: BP-2062197654-172.17.0.2-1731497307248:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35025] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:40,782 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-764241520_22 at /127.0.0.1:36520 [Receiving block BP-2062197654-172.17.0.2-1731497307248:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35025:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36520 dst: /127.0.0.1:35025 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:40,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-764241520_22 at /127.0.0.1:54142 [Receiving block BP-2062197654-172.17.0.2-1731497307248:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46509:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54142 dst: /127.0.0.1:46509 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:40,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-764241520_22 at /127.0.0.1:54138 [Receiving block BP-2062197654-172.17.0.2-1731497307248:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46509:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54138 dst: /127.0.0.1:46509 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:40,783 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5f93d049{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:28:40,783 WARN [DataStreamer for file /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497308258 block BP-2062197654-172.17.0.2-1731497307248:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2062197654-172.17.0.2-1731497307248:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35025,DS-8cb1cf42-f723-427a-b40f-d5b9ddd4c8b2,DISK], DatanodeInfoWithStorage[127.0.0.1:46509,DS-9b7231eb-85a6-4b16-a665-68c48d6e469e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35025,DS-8cb1cf42-f723-427a-b40f-d5b9ddd4c8b2,DISK]) is bad. 2024-11-13T11:28:40,784 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_132543015_22 at /127.0.0.1:54100 [Receiving block BP-2062197654-172.17.0.2-1731497307248:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46509:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54100 dst: /127.0.0.1:46509 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:40,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_132543015_22 at /127.0.0.1:36494 [Receiving block BP-2062197654-172.17.0.2-1731497307248:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35025:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36494 dst: /127.0.0.1:35025 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:40,784 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-764241520_22 at /127.0.0.1:36536 [Receiving block BP-2062197654-172.17.0.2-1731497307248:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35025:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36536 dst: /127.0.0.1:35025 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:40,784 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7d5149ff{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:28:40,785 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:28:40,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@245efd98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:28:40,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a2c549c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/hadoop.log.dir/,STOPPED} 2024-11-13T11:28:40,786 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:28:40,786 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:28:40,789 WARN [BP-2062197654-172.17.0.2-1731497307248 heartbeating to localhost/127.0.0.1:43265 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:28:40,789 WARN [BP-2062197654-172.17.0.2-1731497307248 heartbeating to localhost/127.0.0.1:43265 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2062197654-172.17.0.2-1731497307248 (Datanode Uuid cc5b04f4-563b-4ce8-9e4d-2accea5cddaa) service to localhost/127.0.0.1:43265 2024-11-13T11:28:40,790 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/cluster_fa1b88cd-72d9-e50c-8179-5c107543bce8/data/data3/current/BP-2062197654-172.17.0.2-1731497307248 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:28:40,790 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/cluster_fa1b88cd-72d9-e50c-8179-5c107543bce8/data/data4/current/BP-2062197654-172.17.0.2-1731497307248 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:28:40,790 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:28:40,815 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:28:40,822 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:28:40,823 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:28:40,823 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:28:40,824 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T11:28:40,824 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2841d149{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:28:40,825 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d9d252f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:28:40,965 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@67e93e07{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/java.io.tmpdir/jetty-localhost-33533-hadoop-hdfs-3_4_1-tests_jar-_-any-12455442101728501892/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:28:40,966 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2b6fee43{HTTP/1.1, (http/1.1)}{localhost:33533} 2024-11-13T11:28:40,966 INFO [Time-limited test {}] server.Server(415): Started @163127ms 2024-11-13T11:28:40,968 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:28:40,989 WARN [ResponseProcessor for block BP-2062197654-172.17.0.2-1731497307248:blk_1073741833_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2062197654-172.17.0.2-1731497307248:blk_1073741833_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:40,989 WARN [ResponseProcessor for block BP-2062197654-172.17.0.2-1731497307248:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2062197654-172.17.0.2-1731497307248:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:40,990 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-764241520_22 at /127.0.0.1:58442 [Receiving block BP-2062197654-172.17.0.2-1731497307248:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46509:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58442 dst: /127.0.0.1:46509 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:40,990 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_132543015_22 at /127.0.0.1:58454 [Receiving block BP-2062197654-172.17.0.2-1731497307248:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46509:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58454 dst: /127.0.0.1:46509 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:40,989 WARN [ResponseProcessor for block BP-2062197654-172.17.0.2-1731497307248:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2062197654-172.17.0.2-1731497307248:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:40,991 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-764241520_22 at /127.0.0.1:58444 [Receiving block BP-2062197654-172.17.0.2-1731497307248:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46509:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58444 dst: /127.0.0.1:46509 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:40,999 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@520d4f99{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:28:40,999 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@798b58c4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:28:40,999 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:28:41,000 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b6c7e58{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:28:41,000 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77877c2e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/hadoop.log.dir/,STOPPED} 2024-11-13T11:28:41,001 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:28:41,001 WARN [BP-2062197654-172.17.0.2-1731497307248 heartbeating to localhost/127.0.0.1:43265 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:28:41,001 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:28:41,001 WARN [BP-2062197654-172.17.0.2-1731497307248 heartbeating to localhost/127.0.0.1:43265 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2062197654-172.17.0.2-1731497307248 (Datanode Uuid 949331d5-d3b4-4314-a459-0cb433807dfb) service to localhost/127.0.0.1:43265 2024-11-13T11:28:41,002 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/cluster_fa1b88cd-72d9-e50c-8179-5c107543bce8/data/data1/current/BP-2062197654-172.17.0.2-1731497307248 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:28:41,002 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/cluster_fa1b88cd-72d9-e50c-8179-5c107543bce8/data/data2/current/BP-2062197654-172.17.0.2-1731497307248 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:28:41,002 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:28:41,019 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:28:41,024 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:28:41,026 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:28:41,026 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:28:41,026 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T11:28:41,027 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ec7d32e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:28:41,027 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4dfcaa19{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:28:41,063 WARN [Thread-1330 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:28:41,068 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x55ead61a288ebc6 with lease ID 0xc90b1c18e7d659d1: from storage DS-8cb1cf42-f723-427a-b40f-d5b9ddd4c8b2 node DatanodeRegistration(127.0.0.1:41317, datanodeUuid=cc5b04f4-563b-4ce8-9e4d-2accea5cddaa, infoPort=41075, infoSecurePort=0, ipcPort=44037, storageInfo=lv=-57;cid=testClusterID;nsid=1880148483;c=1731497307248), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:28:41,068 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x55ead61a288ebc6 with lease ID 0xc90b1c18e7d659d1: from storage DS-1b684c56-7a5f-4b6e-b293-0a388a2c321e node DatanodeRegistration(127.0.0.1:41317, datanodeUuid=cc5b04f4-563b-4ce8-9e4d-2accea5cddaa, infoPort=41075, infoSecurePort=0, ipcPort=44037, storageInfo=lv=-57;cid=testClusterID;nsid=1880148483;c=1731497307248), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T11:28:41,135 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4dbdc2b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/java.io.tmpdir/jetty-localhost-33261-hadoop-hdfs-3_4_1-tests_jar-_-any-5569786372154903771/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:28:41,135 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7006c52a{HTTP/1.1, (http/1.1)}{localhost:33261} 2024-11-13T11:28:41,135 INFO [Time-limited test {}] server.Server(415): Started @163296ms 2024-11-13T11:28:41,136 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:28:41,202 WARN [Thread-1361 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:28:41,205 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ae15236ecfc3027 with lease ID 0xc90b1c18e7d659d2: from storage DS-9b7231eb-85a6-4b16-a665-68c48d6e469e node DatanodeRegistration(127.0.0.1:36471, datanodeUuid=949331d5-d3b4-4314-a459-0cb433807dfb, infoPort=44167, infoSecurePort=0, ipcPort=40143, storageInfo=lv=-57;cid=testClusterID;nsid=1880148483;c=1731497307248), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:28:41,205 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ae15236ecfc3027 with lease ID 0xc90b1c18e7d659d2: from storage DS-cb4ad007-ab37-46a2-8244-47f4b8c2da3d node DatanodeRegistration(127.0.0.1:36471, datanodeUuid=949331d5-d3b4-4314-a459-0cb433807dfb, infoPort=44167, infoSecurePort=0, ipcPort=40143, storageInfo=lv=-57;cid=testClusterID;nsid=1880148483;c=1731497307248), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T11:28:41,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:41,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:42,155 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-13T11:28:42,160 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-13T11:28:42,162 ERROR [FSHLog-0-hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea-prefix:7bf281cf3991,34633,1731497307877 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46509,DS-9b7231eb-85a6-4b16-a665-68c48d6e469e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:42,162 WARN [FSHLog-0-hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea-prefix:7bf281cf3991,34633,1731497307877 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46509,DS-9b7231eb-85a6-4b16-a665-68c48d6e469e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:42,162 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7bf281cf3991%2C34633%2C1731497307877:(num 1731497308258) roll requested 2024-11-13T11:28:42,162 INFO [regionserver/7bf281cf3991:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C34633%2C1731497307877.1731497322162 2024-11-13T11:28:42,167 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497308258 newFile=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497322162 2024-11-13T11:28:42,168 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:42,168 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:42,168 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:42,168 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:42,168 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:42,168 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497308258 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497322162 2024-11-13T11:28:42,168 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46509,DS-9b7231eb-85a6-4b16-a665-68c48d6e469e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:42,169 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46509,DS-9b7231eb-85a6-4b16-a665-68c48d6e469e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:42,169 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497308258 2024-11-13T11:28:42,169 WARN [IPC Server handler 4 on default port 43265 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497308258 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1013 2024-11-13T11:28:42,169 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497308258 after 0ms 2024-11-13T11:28:42,170 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44167:44167),(127.0.0.1/127.0.0.1:41075:41075)] 2024-11-13T11:28:42,170 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497308258 is not closed yet, will try archiving it next time 2024-11-13T11:28:42,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:42,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:43,070 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1013: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-13T11:28:43,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:43,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:44,175 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-13T11:28:44,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:44,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:45,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:45,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:46,171 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497308258 after 4002ms 2024-11-13T11:28:46,181 WARN [ResponseProcessor for block BP-2062197654-172.17.0.2-1731497307248:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2062197654-172.17.0.2-1731497307248:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:46,181 WARN [DataStreamer for file /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497322162 block BP-2062197654-172.17.0.2-1731497307248:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2062197654-172.17.0.2-1731497307248:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36471,DS-9b7231eb-85a6-4b16-a665-68c48d6e469e,DISK], DatanodeInfoWithStorage[127.0.0.1:41317,DS-8cb1cf42-f723-427a-b40f-d5b9ddd4c8b2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36471,DS-9b7231eb-85a6-4b16-a665-68c48d6e469e,DISK]) is bad. 2024-11-13T11:28:46,182 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-764241520_22 at /127.0.0.1:59470 [Receiving block BP-2062197654-172.17.0.2-1731497307248:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:36471:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59470 dst: /127.0.0.1:36471 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:46,182 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-764241520_22 at /127.0.0.1:47986 [Receiving block BP-2062197654-172.17.0.2-1731497307248:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47986 dst: /127.0.0.1:41317 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:46,184 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4dbdc2b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:28:46,185 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7006c52a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:28:46,185 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:28:46,185 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4dfcaa19{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:28:46,186 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ec7d32e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/hadoop.log.dir/,STOPPED} 2024-11-13T11:28:46,188 WARN [BP-2062197654-172.17.0.2-1731497307248 heartbeating to localhost/127.0.0.1:43265 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:28:46,188 WARN [BP-2062197654-172.17.0.2-1731497307248 heartbeating to localhost/127.0.0.1:43265 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2062197654-172.17.0.2-1731497307248 (Datanode Uuid 949331d5-d3b4-4314-a459-0cb433807dfb) service to localhost/127.0.0.1:43265 2024-11-13T11:28:46,188 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:28:46,188 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:28:46,189 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/cluster_fa1b88cd-72d9-e50c-8179-5c107543bce8/data/data1/current/BP-2062197654-172.17.0.2-1731497307248 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:28:46,189 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/cluster_fa1b88cd-72d9-e50c-8179-5c107543bce8/data/data2/current/BP-2062197654-172.17.0.2-1731497307248 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:28:46,189 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:28:46,200 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:28:46,205 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:28:46,205 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:28:46,205 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:28:46,206 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T11:28:46,206 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47748b03{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:28:46,206 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7838f0c4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:28:46,299 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78f5fa8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/java.io.tmpdir/jetty-localhost-34663-hadoop-hdfs-3_4_1-tests_jar-_-any-9615928821883093940/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:28:46,299 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@16e84d55{HTTP/1.1, (http/1.1)}{localhost:34663} 2024-11-13T11:28:46,299 INFO [Time-limited test {}] server.Server(415): Started @168460ms 2024-11-13T11:28:46,300 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:28:46,317 WARN [ResponseProcessor for block BP-2062197654-172.17.0.2-1731497307248:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2062197654-172.17.0.2-1731497307248:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:46,317 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-764241520_22 at /127.0.0.1:48002 [Receiving block BP-2062197654-172.17.0.2-1731497307248:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48002 dst: /127.0.0.1:41317 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:46,320 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@67e93e07{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:28:46,321 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2b6fee43{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:28:46,321 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:28:46,321 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d9d252f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:28:46,321 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2841d149{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/hadoop.log.dir/,STOPPED} 2024-11-13T11:28:46,322 WARN [BP-2062197654-172.17.0.2-1731497307248 heartbeating to localhost/127.0.0.1:43265 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:28:46,322 WARN [BP-2062197654-172.17.0.2-1731497307248 heartbeating to localhost/127.0.0.1:43265 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2062197654-172.17.0.2-1731497307248 (Datanode Uuid cc5b04f4-563b-4ce8-9e4d-2accea5cddaa) service to localhost/127.0.0.1:43265 2024-11-13T11:28:46,322 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/cluster_fa1b88cd-72d9-e50c-8179-5c107543bce8/data/data3/current/BP-2062197654-172.17.0.2-1731497307248 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:28:46,323 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/cluster_fa1b88cd-72d9-e50c-8179-5c107543bce8/data/data4/current/BP-2062197654-172.17.0.2-1731497307248 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:28:46,323 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:28:46,323 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:28:46,323 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:28:46,335 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:28:46,339 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:28:46,340 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:28:46,340 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:28:46,340 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T11:28:46,341 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@494b651f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:28:46,342 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10e0b03{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:28:46,396 WARN [Thread-1404 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:28:46,399 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa365f4673c1761ca with lease ID 0xc90b1c18e7d659d3: from storage DS-9b7231eb-85a6-4b16-a665-68c48d6e469e node DatanodeRegistration(127.0.0.1:43299, datanodeUuid=949331d5-d3b4-4314-a459-0cb433807dfb, infoPort=38959, infoSecurePort=0, ipcPort=35711, storageInfo=lv=-57;cid=testClusterID;nsid=1880148483;c=1731497307248), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:28:46,399 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa365f4673c1761ca with lease ID 0xc90b1c18e7d659d3: from storage DS-cb4ad007-ab37-46a2-8244-47f4b8c2da3d node DatanodeRegistration(127.0.0.1:43299, datanodeUuid=949331d5-d3b4-4314-a459-0cb433807dfb, infoPort=38959, infoSecurePort=0, ipcPort=35711, storageInfo=lv=-57;cid=testClusterID;nsid=1880148483;c=1731497307248), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:28:46,463 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4127de5b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/java.io.tmpdir/jetty-localhost-39609-hadoop-hdfs-3_4_1-tests_jar-_-any-5892808888997159486/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:28:46,464 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5757dea9{HTTP/1.1, (http/1.1)}{localhost:39609} 2024-11-13T11:28:46,464 INFO [Time-limited test {}] server.Server(415): Started @168624ms 2024-11-13T11:28:46,465 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:28:46,529 WARN [Thread-1435 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:28:46,531 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8310b25778a6808 with lease ID 0xc90b1c18e7d659d4: from storage DS-8cb1cf42-f723-427a-b40f-d5b9ddd4c8b2 node DatanodeRegistration(127.0.0.1:45251, datanodeUuid=cc5b04f4-563b-4ce8-9e4d-2accea5cddaa, infoPort=42033, infoSecurePort=0, ipcPort=45903, storageInfo=lv=-57;cid=testClusterID;nsid=1880148483;c=1731497307248), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:28:46,531 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8310b25778a6808 with lease ID 0xc90b1c18e7d659d4: from storage DS-1b684c56-7a5f-4b6e-b293-0a388a2c321e node DatanodeRegistration(127.0.0.1:45251, datanodeUuid=cc5b04f4-563b-4ce8-9e4d-2accea5cddaa, infoPort=42033, infoSecurePort=0, ipcPort=45903, storageInfo=lv=-57;cid=testClusterID;nsid=1880148483;c=1731497307248), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:28:46,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:46,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:47,482 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-13T11:28:47,486 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-13T11:28:47,488 ERROR [FSHLog-0-hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea-prefix:7bf281cf3991,34633,1731497307877 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41317,DS-8cb1cf42-f723-427a-b40f-d5b9ddd4c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:47,489 WARN [FSHLog-0-hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea-prefix:7bf281cf3991,34633,1731497307877 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41317,DS-8cb1cf42-f723-427a-b40f-d5b9ddd4c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:47,489 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7bf281cf3991%2C34633%2C1731497307877:(num 1731497322162) roll requested 2024-11-13T11:28:47,489 INFO [regionserver/7bf281cf3991:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C34633%2C1731497307877.1731497327489 2024-11-13T11:28:47,498 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497322162 newFile=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497327489 2024-11-13T11:28:47,498 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:47,498 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:47,498 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:47,498 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:47,499 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:47,499 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497322162 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497327489 2024-11-13T11:28:47,499 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41317,DS-8cb1cf42-f723-427a-b40f-d5b9ddd4c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:47,499 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41317,DS-8cb1cf42-f723-427a-b40f-d5b9ddd4c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:47,499 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497322162 2024-11-13T11:28:47,500 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38959:38959),(127.0.0.1/127.0.0.1:42033:42033)] 2024-11-13T11:28:47,500 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497322162 is not closed yet, will try archiving it next time 2024-11-13T11:28:47,500 WARN [IPC Server handler 2 on default port 43265 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497322162 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-13T11:28:47,500 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497322162 after 1ms 2024-11-13T11:28:47,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:47,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:48,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:48,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:49,400 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-13T11:28:49,502 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C34633%2C1731497307877.1731497329502 2024-11-13T11:28:49,512 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497327489 newFile=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497329502 2024-11-13T11:28:49,512 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:49,513 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:49,513 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:49,513 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:49,513 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:49,513 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497327489 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497329502 2024-11-13T11:28:49,516 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42033:42033),(127.0.0.1/127.0.0.1:38959:38959)] 2024-11-13T11:28:49,516 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497322162 is not closed yet, will try archiving it next time 2024-11-13T11:28:49,516 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497327489 is not closed yet, will try archiving it next time 2024-11-13T11:28:49,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45251 is added to blk_1073741838_1019 (size=1264) 2024-11-13T11:28:49,516 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497308258 2024-11-13T11:28:49,517 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497308258 2024-11-13T11:28:49,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43299 is added to blk_1073741838_1019 (size=1264) 2024-11-13T11:28:49,517 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497322162 is not closed yet, will try archiving it next time 2024-11-13T11:28:49,517 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497308258 after 0ms 2024-11-13T11:28:49,517 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497308258 2024-11-13T11:28:49,529 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731497309074/Put/vlen=218/seqid=0] 2024-11-13T11:28:49,530 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731497318774/Put/vlen=1045/seqid=0] 2024-11-13T11:28:49,530 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497308258 2024-11-13T11:28:49,530 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497322162 2024-11-13T11:28:49,530 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497322162 2024-11-13T11:28:49,530 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497322162 after 0ms 2024-11-13T11:28:49,530 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497322162 2024-11-13T11:28:49,534 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731497322162/Put/vlen=1045/seqid=0] 2024-11-13T11:28:49,534 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731497324177/Put/vlen=1045/seqid=0] 2024-11-13T11:28:49,534 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497322162 2024-11-13T11:28:49,534 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497327489 2024-11-13T11:28:49,534 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497327489 2024-11-13T11:28:49,535 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497327489 after 1ms 2024-11-13T11:28:49,535 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497327489 2024-11-13T11:28:49,538 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731497327488/Put/vlen=1045/seqid=0] 2024-11-13T11:28:49,538 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497329502 2024-11-13T11:28:49,538 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497329502 2024-11-13T11:28:49,539 WARN [IPC Server handler 0 on default port 43265 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497329502 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-13T11:28:49,539 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497329502 after 1ms 2024-11-13T11:28:49,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:49,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:50,533 WARN [ResponseProcessor for block BP-2062197654-172.17.0.2-1731497307248:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2062197654-172.17.0.2-1731497307248:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:50,533 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_132543015_22 at /127.0.0.1:36692 [Receiving block BP-2062197654-172.17.0.2-1731497307248:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:45251:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36692 dst: /127.0.0.1:45251 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:45251 remote=/127.0.0.1:36692]. Total timeout mills is 60000, 58979 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:50,533 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_132543015_22 at /127.0.0.1:49632 [Receiving block BP-2062197654-172.17.0.2-1731497307248:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:43299:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49632 dst: /127.0.0.1:43299 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:28:50,533 WARN [DataStreamer for file /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497329502 block BP-2062197654-172.17.0.2-1731497307248:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2062197654-172.17.0.2-1731497307248:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45251,DS-8cb1cf42-f723-427a-b40f-d5b9ddd4c8b2,DISK], DatanodeInfoWithStorage[127.0.0.1:43299,DS-9b7231eb-85a6-4b16-a665-68c48d6e469e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45251,DS-8cb1cf42-f723-427a-b40f-d5b9ddd4c8b2,DISK]) is bad. 2024-11-13T11:28:50,538 WARN [DataStreamer for file /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497329502 block BP-2062197654-172.17.0.2-1731497307248:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2062197654-172.17.0.2-1731497307248:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:50,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45251 is added to blk_1073741839_1022 (size=85) 2024-11-13T11:28:50,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43299 is added to blk_1073741839_1022 (size=85) 2024-11-13T11:28:50,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:50,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:51,502 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497322162 after 4003ms 2024-11-13T11:28:51,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:51,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:52,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:52,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:53,541 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497329502 after 4003ms 2024-11-13T11:28:53,541 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497329502 2024-11-13T11:28:53,552 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497329502 2024-11-13T11:28:53,553 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-13T11:28:53,553 ERROR [FSHLog-0-hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea-prefix:7bf281cf3991,34633,1731497307877.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46509,DS-9b7231eb-85a6-4b16-a665-68c48d6e469e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:53,553 WARN [FSHLog-0-hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea-prefix:7bf281cf3991,34633,1731497307877.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46509,DS-9b7231eb-85a6-4b16-a665-68c48d6e469e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:53,553 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7bf281cf3991%2C34633%2C1731497307877.meta:.meta(num 1731497308626) roll requested 2024-11-13T11:28:53,554 INFO [regionserver/7bf281cf3991:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C34633%2C1731497307877.meta.1731497333554.meta 2024-11-13T11:28:53,560 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:53,560 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:53,561 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:53,561 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:53,561 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:53,561 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.meta.1731497308626.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.meta.1731497333554.meta 2024-11-13T11:28:53,561 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46509,DS-9b7231eb-85a6-4b16-a665-68c48d6e469e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:53,561 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46509,DS-9b7231eb-85a6-4b16-a665-68c48d6e469e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:53,561 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.meta.1731497308626.meta 2024-11-13T11:28:53,562 WARN [IPC Server handler 3 on default port 43265 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.meta.1731497308626.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1014 2024-11-13T11:28:53,562 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42033:42033),(127.0.0.1/127.0.0.1:38959:38959)] 2024-11-13T11:28:53,562 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.meta.1731497308626.meta after 1ms 2024-11-13T11:28:53,562 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.meta.1731497308626.meta is not closed yet, will try archiving it next time 2024-11-13T11:28:53,577 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740/.tmp/info/8b4635194ee4463d94f3f7238ece4aac is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b./info:regioninfo/1731497309078/Put/seqid=0 2024-11-13T11:28:53,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45251 is added to blk_1073741841_1025 (size=7125) 2024-11-13T11:28:53,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43299 is added to blk_1073741841_1025 (size=7125) 2024-11-13T11:28:53,583 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740/.tmp/info/8b4635194ee4463d94f3f7238ece4aac 2024-11-13T11:28:53,601 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740/.tmp/ns/0a9b7cceba4842f6ba35d74a7f3404e2 is 43, key is default/ns:d/1731497308665/Put/seqid=0 2024-11-13T11:28:53,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45251 is added to blk_1073741842_1026 (size=5153) 2024-11-13T11:28:53,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43299 is added to blk_1073741842_1026 (size=5153) 2024-11-13T11:28:53,606 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740/.tmp/ns/0a9b7cceba4842f6ba35d74a7f3404e2 2024-11-13T11:28:53,626 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740/.tmp/table/c59a50c81dee4d2d8ca2440d758cf6f4 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731497309088/Put/seqid=0 2024-11-13T11:28:53,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43299 is added to blk_1073741843_1027 (size=5438) 2024-11-13T11:28:53,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45251 is added to blk_1073741843_1027 (size=5438) 2024-11-13T11:28:53,631 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740/.tmp/table/c59a50c81dee4d2d8ca2440d758cf6f4 2024-11-13T11:28:53,637 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740/.tmp/info/8b4635194ee4463d94f3f7238ece4aac as hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740/info/8b4635194ee4463d94f3f7238ece4aac 2024-11-13T11:28:53,644 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740/info/8b4635194ee4463d94f3f7238ece4aac, entries=10, sequenceid=11, filesize=7.0 K 2024-11-13T11:28:53,645 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740/.tmp/ns/0a9b7cceba4842f6ba35d74a7f3404e2 as hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740/ns/0a9b7cceba4842f6ba35d74a7f3404e2 2024-11-13T11:28:53,652 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740/ns/0a9b7cceba4842f6ba35d74a7f3404e2, entries=2, sequenceid=11, filesize=5.0 K 2024-11-13T11:28:53,653 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740/.tmp/table/c59a50c81dee4d2d8ca2440d758cf6f4 as hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740/table/c59a50c81dee4d2d8ca2440d758cf6f4 2024-11-13T11:28:53,659 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740/table/c59a50c81dee4d2d8ca2440d758cf6f4, entries=2, sequenceid=11, filesize=5.3 K 2024-11-13T11:28:53,660 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 107ms, sequenceid=11, compaction requested=false 2024-11-13T11:28:53,660 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-13T11:28:53,660 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing b4b0fc9ac154722e9a6dbdec8f25692b 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-13T11:28:53,660 ERROR [FSHLog-0-hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea-prefix:7bf281cf3991,34633,1731497307877 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2062197654-172.17.0.2-1731497307248:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:53,661 WARN [FSHLog-0-hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea-prefix:7bf281cf3991,34633,1731497307877 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2062197654-172.17.0.2-1731497307248:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:53,661 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7bf281cf3991%2C34633%2C1731497307877:(num 1731497329502) roll requested 2024-11-13T11:28:53,661 INFO [regionserver/7bf281cf3991:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C34633%2C1731497307877.1731497333661 2024-11-13T11:28:53,666 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497329502 newFile=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497333661 2024-11-13T11:28:53,666 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:53,666 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:53,667 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:53,667 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:53,667 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:53,667 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497329502 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497333661 2024-11-13T11:28:53,667 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2062197654-172.17.0.2-1731497307248:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:53,667 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38959:38959),(127.0.0.1/127.0.0.1:42033:42033)] 2024-11-13T11:28:53,667 DEBUG [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497329502 is not closed yet, will try archiving it next time 2024-11-13T11:28:53,667 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2062197654-172.17.0.2-1731497307248:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:53,668 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497329502 2024-11-13T11:28:53,668 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497329502 after 0ms 2024-11-13T11:28:53,669 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.1731497329502 to hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/oldWALs/7bf281cf3991%2C34633%2C1731497307877.1731497329502 2024-11-13T11:28:53,682 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/default/TestLogRolling-testLogRollOnPipelineRestart/b4b0fc9ac154722e9a6dbdec8f25692b/.tmp/info/b343d08a90bb4920b7bf33830cc208c2 is 1080, key is row1002/info:/1731497318774/Put/seqid=0 2024-11-13T11:28:53,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43299 is added to blk_1073741845_1029 (size=9270) 2024-11-13T11:28:53,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45251 is added to blk_1073741845_1029 (size=9270) 2024-11-13T11:28:53,687 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/default/TestLogRolling-testLogRollOnPipelineRestart/b4b0fc9ac154722e9a6dbdec8f25692b/.tmp/info/b343d08a90bb4920b7bf33830cc208c2 2024-11-13T11:28:53,693 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/default/TestLogRolling-testLogRollOnPipelineRestart/b4b0fc9ac154722e9a6dbdec8f25692b/.tmp/info/b343d08a90bb4920b7bf33830cc208c2 as hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/default/TestLogRolling-testLogRollOnPipelineRestart/b4b0fc9ac154722e9a6dbdec8f25692b/info/b343d08a90bb4920b7bf33830cc208c2 2024-11-13T11:28:53,698 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/default/TestLogRolling-testLogRollOnPipelineRestart/b4b0fc9ac154722e9a6dbdec8f25692b/info/b343d08a90bb4920b7bf33830cc208c2, entries=4, sequenceid=8, filesize=9.1 K 2024-11-13T11:28:53,699 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for b4b0fc9ac154722e9a6dbdec8f25692b in 39ms, sequenceid=8, compaction requested=false 2024-11-13T11:28:53,699 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for b4b0fc9ac154722e9a6dbdec8f25692b: 2024-11-13T11:28:53,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:53,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T11:28:53,704 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T11:28:53,704 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:28:53,704 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:28:53,704 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:28:53,704 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T11:28:53,704 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T11:28:53,705 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1498404850, stopped=false 2024-11-13T11:28:53,705 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7bf281cf3991,32853,1731497307831 2024-11-13T11:28:53,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:53,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T11:28:53,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T11:28:53,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:28:53,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:28:53,706 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T11:28:53,706 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T11:28:53,706 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:28:53,706 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:28:53,706 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:28:53,706 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:28:53,707 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7bf281cf3991,34633,1731497307877' ***** 2024-11-13T11:28:53,707 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T11:28:53,707 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T11:28:53,707 INFO [RS:0;7bf281cf3991:34633 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T11:28:53,707 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T11:28:53,707 INFO [RS:0;7bf281cf3991:34633 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T11:28:53,707 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.HRegionServer(3091): Received CLOSE for b4b0fc9ac154722e9a6dbdec8f25692b 2024-11-13T11:28:53,707 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.HRegionServer(959): stopping server 7bf281cf3991,34633,1731497307877 2024-11-13T11:28:53,707 INFO [RS:0;7bf281cf3991:34633 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T11:28:53,707 INFO [RS:0;7bf281cf3991:34633 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7bf281cf3991:34633. 2024-11-13T11:28:53,708 DEBUG [RS:0;7bf281cf3991:34633 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:28:53,708 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b4b0fc9ac154722e9a6dbdec8f25692b, disabling compactions & flushes 2024-11-13T11:28:53,708 DEBUG [RS:0;7bf281cf3991:34633 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:28:53,708 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b. 2024-11-13T11:28:53,708 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b. 2024-11-13T11:28:53,708 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T11:28:53,708 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T11:28:53,708 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T11:28:53,708 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b. after waiting 0 ms 2024-11-13T11:28:53,708 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T11:28:53,708 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b. 2024-11-13T11:28:53,708 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-13T11:28:53,708 DEBUG [RS:0;7bf281cf3991:34633 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, b4b0fc9ac154722e9a6dbdec8f25692b=TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b.} 2024-11-13T11:28:53,708 DEBUG [RS:0;7bf281cf3991:34633 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, b4b0fc9ac154722e9a6dbdec8f25692b 2024-11-13T11:28:53,708 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T11:28:53,708 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T11:28:53,708 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T11:28:53,708 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T11:28:53,708 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T11:28:53,713 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/default/TestLogRolling-testLogRollOnPipelineRestart/b4b0fc9ac154722e9a6dbdec8f25692b/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-13T11:28:53,714 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-13T11:28:53,714 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b. 2024-11-13T11:28:53,714 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T11:28:53,714 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b4b0fc9ac154722e9a6dbdec8f25692b: Waiting for close lock at 1731497333707Running coprocessor pre-close hooks at 1731497333707Disabling compacts and flushes for region at 1731497333707Disabling writes for close at 1731497333708 (+1 ms)Writing region close event to WAL at 1731497333708Running coprocessor post-close hooks at 1731497333714 (+6 ms)Closed at 1731497333714 2024-11-13T11:28:53,715 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T11:28:53,715 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731497333708Running coprocessor pre-close hooks at 1731497333708Disabling compacts and flushes for region at 1731497333708Disabling writes for close at 1731497333708Writing region close event to WAL at 1731497333709 (+1 ms)Running coprocessor post-close hooks at 1731497333714 (+5 ms)Closed at 1731497333714 2024-11-13T11:28:53,715 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731497308707.b4b0fc9ac154722e9a6dbdec8f25692b. 2024-11-13T11:28:53,715 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T11:28:53,908 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.HRegionServer(976): stopping server 7bf281cf3991,34633,1731497307877; all regions closed. 2024-11-13T11:28:53,910 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:53,910 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:53,910 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:53,911 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:53,911 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:53,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43299 is added to blk_1073741840_1023 (size=825) 2024-11-13T11:28:53,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45251 is added to blk_1073741840_1023 (size=825) 2024-11-13T11:28:53,965 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T11:28:53,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T11:28:53,967 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-13T11:28:54,127 INFO [regionserver/7bf281cf3991:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T11:28:54,206 INFO [regionserver/7bf281cf3991:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T11:28:54,206 INFO [regionserver/7bf281cf3991:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T11:28:54,533 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-13T11:28:54,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:54,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:55,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:55,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:56,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:56,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:57,563 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.meta.1731497308626.meta after 4002ms 2024-11-13T11:28:57,564 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/WALs/7bf281cf3991,34633,1731497307877/7bf281cf3991%2C34633%2C1731497307877.meta.1731497308626.meta to hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/oldWALs/7bf281cf3991%2C34633%2C1731497307877.meta.1731497308626.meta 2024-11-13T11:28:57,569 DEBUG [RS:0;7bf281cf3991:34633 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/oldWALs 2024-11-13T11:28:57,569 INFO [RS:0;7bf281cf3991:34633 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7bf281cf3991%2C34633%2C1731497307877.meta:.meta(num 1731497333554) 2024-11-13T11:28:57,570 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:57,570 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:57,571 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:57,571 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:57,571 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:57,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45251 is added to blk_1073741844_1028 (size=1162) 2024-11-13T11:28:57,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43299 is added to blk_1073741844_1028 (size=1162) 2024-11-13T11:28:57,582 DEBUG [RS:0;7bf281cf3991:34633 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/oldWALs 2024-11-13T11:28:57,582 INFO [RS:0;7bf281cf3991:34633 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7bf281cf3991%2C34633%2C1731497307877:(num 1731497333661) 2024-11-13T11:28:57,582 DEBUG [RS:0;7bf281cf3991:34633 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:28:57,582 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T11:28:57,582 INFO [RS:0;7bf281cf3991:34633 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T11:28:57,582 INFO [RS:0;7bf281cf3991:34633 {}] hbase.ChoreService(370): Chore service for: regionserver/7bf281cf3991:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T11:28:57,583 INFO [RS:0;7bf281cf3991:34633 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T11:28:57,583 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T11:28:57,583 INFO [RS:0;7bf281cf3991:34633 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34633 2024-11-13T11:28:57,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7bf281cf3991,34633,1731497307877 2024-11-13T11:28:57,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T11:28:57,585 INFO [RS:0;7bf281cf3991:34633 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T11:28:57,586 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7bf281cf3991,34633,1731497307877] 2024-11-13T11:28:57,587 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7bf281cf3991,34633,1731497307877 already deleted, retry=false 2024-11-13T11:28:57,587 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7bf281cf3991,34633,1731497307877 expired; onlineServers=0 2024-11-13T11:28:57,587 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7bf281cf3991,32853,1731497307831' ***** 2024-11-13T11:28:57,587 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T11:28:57,587 INFO [M:0;7bf281cf3991:32853 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T11:28:57,587 INFO [M:0;7bf281cf3991:32853 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T11:28:57,587 DEBUG [M:0;7bf281cf3991:32853 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T11:28:57,588 DEBUG [M:0;7bf281cf3991:32853 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T11:28:57,588 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T11:28:57,588 DEBUG [master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497308025 {}] cleaner.HFileCleaner(306): Exit Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497308025,5,FailOnTimeoutGroup] 2024-11-13T11:28:57,588 DEBUG [master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497308024 {}] cleaner.HFileCleaner(306): Exit Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497308024,5,FailOnTimeoutGroup] 2024-11-13T11:28:57,588 INFO [M:0;7bf281cf3991:32853 {}] hbase.ChoreService(370): Chore service for: master/7bf281cf3991:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T11:28:57,588 INFO [M:0;7bf281cf3991:32853 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T11:28:57,588 DEBUG [M:0;7bf281cf3991:32853 {}] master.HMaster(1795): Stopping service threads 2024-11-13T11:28:57,588 INFO [M:0;7bf281cf3991:32853 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T11:28:57,588 INFO [M:0;7bf281cf3991:32853 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T11:28:57,589 INFO [M:0;7bf281cf3991:32853 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T11:28:57,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T11:28:57,589 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T11:28:57,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:28:57,589 DEBUG [M:0;7bf281cf3991:32853 {}] zookeeper.ZKUtil(347): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T11:28:57,589 WARN [M:0;7bf281cf3991:32853 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T11:28:57,590 INFO [M:0;7bf281cf3991:32853 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/.lastflushedseqids 2024-11-13T11:28:57,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45251 is added to blk_1073741846_1030 (size=139) 2024-11-13T11:28:57,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43299 is added to blk_1073741846_1030 (size=139) 2024-11-13T11:28:57,595 INFO [M:0;7bf281cf3991:32853 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T11:28:57,595 INFO [M:0;7bf281cf3991:32853 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T11:28:57,595 DEBUG [M:0;7bf281cf3991:32853 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T11:28:57,595 INFO [M:0;7bf281cf3991:32853 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:28:57,595 DEBUG [M:0;7bf281cf3991:32853 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:28:57,595 DEBUG [M:0;7bf281cf3991:32853 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T11:28:57,595 DEBUG [M:0;7bf281cf3991:32853 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:28:57,595 INFO [M:0;7bf281cf3991:32853 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-13T11:28:57,595 ERROR [FSHLog-0-hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData-prefix:7bf281cf3991,32853,1731497307831 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46509,DS-9b7231eb-85a6-4b16-a665-68c48d6e469e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:57,595 WARN [FSHLog-0-hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData-prefix:7bf281cf3991,32853,1731497307831 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46509,DS-9b7231eb-85a6-4b16-a665-68c48d6e469e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:57,596 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 7bf281cf3991%2C32853%2C1731497307831:(num 1731497307957) roll requested 2024-11-13T11:28:57,596 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C32853%2C1731497307831.1731497337596 2024-11-13T11:28:57,600 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:57,601 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:57,601 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:57,601 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:57,601 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:57,601 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/WALs/7bf281cf3991,32853,1731497307831/7bf281cf3991%2C32853%2C1731497307831.1731497307957 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/WALs/7bf281cf3991,32853,1731497307831/7bf281cf3991%2C32853%2C1731497307831.1731497337596 2024-11-13T11:28:57,601 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46509,DS-9b7231eb-85a6-4b16-a665-68c48d6e469e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:57,601 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46509,DS-9b7231eb-85a6-4b16-a665-68c48d6e469e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T11:28:57,601 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/WALs/7bf281cf3991,32853,1731497307831/7bf281cf3991%2C32853%2C1731497307831.1731497307957 2024-11-13T11:28:57,602 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42033:42033),(127.0.0.1/127.0.0.1:38959:38959)] 2024-11-13T11:28:57,602 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/WALs/7bf281cf3991,32853,1731497307831/7bf281cf3991%2C32853%2C1731497307831.1731497307957 is not closed yet, will try archiving it next time 2024-11-13T11:28:57,602 WARN [IPC Server handler 3 on default port 43265 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/WALs/7bf281cf3991,32853,1731497307831/7bf281cf3991%2C32853%2C1731497307831.1731497307957 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-13T11:28:57,602 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/WALs/7bf281cf3991,32853,1731497307831/7bf281cf3991%2C32853%2C1731497307831.1731497307957 after 1ms 2024-11-13T11:28:57,615 DEBUG [M:0;7bf281cf3991:32853 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c110260b81c4e20a2141c8dbc8da7ea is 82, key is hbase:meta,,1/info:regioninfo/1731497308653/Put/seqid=0 2024-11-13T11:28:57,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43299 is added to blk_1073741848_1033 (size=5672) 2024-11-13T11:28:57,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45251 is added to blk_1073741848_1033 (size=5672) 2024-11-13T11:28:57,620 INFO [M:0;7bf281cf3991:32853 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c110260b81c4e20a2141c8dbc8da7ea 2024-11-13T11:28:57,639 DEBUG [M:0;7bf281cf3991:32853 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ce9528024fe149cc9693c3be5f194a6a is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731497309092/Put/seqid=0 2024-11-13T11:28:57,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45251 is added to blk_1073741849_1034 (size=6119) 2024-11-13T11:28:57,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43299 is added to blk_1073741849_1034 (size=6119) 2024-11-13T11:28:57,644 INFO [M:0;7bf281cf3991:32853 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ce9528024fe149cc9693c3be5f194a6a 2024-11-13T11:28:57,663 DEBUG [M:0;7bf281cf3991:32853 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1e551e39d16544b68f060e0786e8e42a is 69, key is 7bf281cf3991,34633,1731497307877/rs:state/1731497308109/Put/seqid=0 2024-11-13T11:28:57,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45251 is added to blk_1073741850_1035 (size=5156) 2024-11-13T11:28:57,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43299 is added to blk_1073741850_1035 (size=5156) 2024-11-13T11:28:57,668 INFO [M:0;7bf281cf3991:32853 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1e551e39d16544b68f060e0786e8e42a 2024-11-13T11:28:57,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:28:57,686 INFO [RS:0;7bf281cf3991:34633 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T11:28:57,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34633-0x10038d78b150001, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:28:57,686 INFO [RS:0;7bf281cf3991:34633 {}] regionserver.HRegionServer(1031): Exiting; stopping=7bf281cf3991,34633,1731497307877; zookeeper connection closed. 2024-11-13T11:28:57,687 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5851bf8c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5851bf8c 2024-11-13T11:28:57,687 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T11:28:57,689 DEBUG [M:0;7bf281cf3991:32853 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5b6d4b28401f421896007ad1f094a084 is 52, key is load_balancer_on/state:d/1731497308702/Put/seqid=0 2024-11-13T11:28:57,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43299 is added to blk_1073741851_1036 (size=5056) 2024-11-13T11:28:57,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45251 is added to blk_1073741851_1036 (size=5056) 2024-11-13T11:28:57,694 INFO [M:0;7bf281cf3991:32853 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5b6d4b28401f421896007ad1f094a084 2024-11-13T11:28:57,699 DEBUG [M:0;7bf281cf3991:32853 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c110260b81c4e20a2141c8dbc8da7ea as hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7c110260b81c4e20a2141c8dbc8da7ea 2024-11-13T11:28:57,705 INFO [M:0;7bf281cf3991:32853 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7c110260b81c4e20a2141c8dbc8da7ea, entries=8, sequenceid=56, filesize=5.5 K 2024-11-13T11:28:57,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:57,706 DEBUG [M:0;7bf281cf3991:32853 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ce9528024fe149cc9693c3be5f194a6a as hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ce9528024fe149cc9693c3be5f194a6a 2024-11-13T11:28:57,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:57,712 INFO [M:0;7bf281cf3991:32853 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ce9528024fe149cc9693c3be5f194a6a, entries=6, sequenceid=56, filesize=6.0 K 2024-11-13T11:28:57,712 DEBUG [M:0;7bf281cf3991:32853 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1e551e39d16544b68f060e0786e8e42a as hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1e551e39d16544b68f060e0786e8e42a 2024-11-13T11:28:57,718 INFO [M:0;7bf281cf3991:32853 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1e551e39d16544b68f060e0786e8e42a, entries=1, sequenceid=56, filesize=5.0 K 2024-11-13T11:28:57,718 DEBUG [M:0;7bf281cf3991:32853 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5b6d4b28401f421896007ad1f094a084 as hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5b6d4b28401f421896007ad1f094a084 2024-11-13T11:28:57,724 INFO [M:0;7bf281cf3991:32853 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5b6d4b28401f421896007ad1f094a084, entries=1, sequenceid=56, filesize=4.9 K 2024-11-13T11:28:57,725 INFO [M:0;7bf281cf3991:32853 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=56, compaction requested=false 2024-11-13T11:28:57,726 INFO [M:0;7bf281cf3991:32853 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:28:57,726 DEBUG [M:0;7bf281cf3991:32853 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731497337595Disabling compacts and flushes for region at 1731497337595Disabling writes for close at 1731497337595Obtaining lock to block concurrent updates at 1731497337595Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731497337595Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731497337595Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731497337602 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731497337602Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731497337615 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731497337615Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731497337625 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731497337638 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731497337638Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731497337648 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731497337662 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731497337662Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731497337672 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731497337688 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731497337689 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9b8bb63: reopening flushed file at 1731497337699 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7cb0f972: reopening flushed file at 1731497337705 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3af80b9d: reopening flushed file at 1731497337712 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f4c7c7b: reopening flushed file at 1731497337718 (+6 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=56, compaction requested=false at 1731497337725 (+7 ms)Writing region close event to WAL at 1731497337726 (+1 ms)Closed at 1731497337726 2024-11-13T11:28:57,726 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:57,726 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:57,726 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:57,726 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:57,727 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:28:57,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43299 is added to blk_1073741847_1031 (size=757) 2024-11-13T11:28:57,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45251 is added to blk_1073741847_1031 (size=757) 2024-11-13T11:28:57,810 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T11:28:58,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:58,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:58,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:58,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:58,733 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:58,733 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:58,733 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:58,734 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:58,734 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:58,734 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:58,737 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:58,737 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:58,737 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:58,739 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:58,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:58,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:59,246 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T11:28:59,248 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:59,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:59,250 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:59,251 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:59,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:59,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:59,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:59,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:59,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:59,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:59,273 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:59,273 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:59,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:59,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:28:59,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:28:59,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:00,534 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-13T11:29:00,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:00,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:01,603 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/WALs/7bf281cf3991,32853,1731497307831/7bf281cf3991%2C32853%2C1731497307831.1731497307957 after 4002ms 2024-11-13T11:29:01,604 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/WALs/7bf281cf3991,32853,1731497307831/7bf281cf3991%2C32853%2C1731497307831.1731497307957 to hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/oldWALs/7bf281cf3991%2C32853%2C1731497307831.1731497307957 2024-11-13T11:29:01,612 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/MasterData/oldWALs/7bf281cf3991%2C32853%2C1731497307831.1731497307957 to hdfs://localhost:43265/user/jenkins/test-data/5db5e082-d82e-eab6-f053-6f42ed5ac8ea/oldWALs/7bf281cf3991%2C32853%2C1731497307831.1731497307957$masterlocalwal$ 2024-11-13T11:29:01,612 INFO [M:0;7bf281cf3991:32853 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T11:29:01,612 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T11:29:01,612 INFO [M:0;7bf281cf3991:32853 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32853 2024-11-13T11:29:01,613 INFO [M:0;7bf281cf3991:32853 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T11:29:01,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:01,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:01,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:29:01,715 INFO [M:0;7bf281cf3991:32853 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T11:29:01,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32853-0x10038d78b150000, quorum=127.0.0.1:60438, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:29:01,721 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4127de5b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:29:01,722 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5757dea9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:29:01,722 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:29:01,723 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10e0b03{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:29:01,723 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@494b651f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/hadoop.log.dir/,STOPPED} 2024-11-13T11:29:01,725 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:29:01,725 WARN [BP-2062197654-172.17.0.2-1731497307248 heartbeating to localhost/127.0.0.1:43265 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:29:01,725 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:29:01,725 WARN [BP-2062197654-172.17.0.2-1731497307248 heartbeating to localhost/127.0.0.1:43265 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2062197654-172.17.0.2-1731497307248 (Datanode Uuid cc5b04f4-563b-4ce8-9e4d-2accea5cddaa) service to localhost/127.0.0.1:43265 2024-11-13T11:29:01,726 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/cluster_fa1b88cd-72d9-e50c-8179-5c107543bce8/data/data3/current/BP-2062197654-172.17.0.2-1731497307248 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:29:01,726 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/cluster_fa1b88cd-72d9-e50c-8179-5c107543bce8/data/data4/current/BP-2062197654-172.17.0.2-1731497307248 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:29:01,727 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:29:01,730 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78f5fa8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:29:01,730 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@16e84d55{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:29:01,730 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:29:01,730 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7838f0c4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:29:01,730 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47748b03{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/hadoop.log.dir/,STOPPED} 2024-11-13T11:29:01,731 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:29:01,731 WARN [BP-2062197654-172.17.0.2-1731497307248 heartbeating to localhost/127.0.0.1:43265 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:29:01,731 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:29:01,731 WARN [BP-2062197654-172.17.0.2-1731497307248 heartbeating to localhost/127.0.0.1:43265 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2062197654-172.17.0.2-1731497307248 (Datanode Uuid 949331d5-d3b4-4314-a459-0cb433807dfb) service to localhost/127.0.0.1:43265 2024-11-13T11:29:01,732 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/cluster_fa1b88cd-72d9-e50c-8179-5c107543bce8/data/data1/current/BP-2062197654-172.17.0.2-1731497307248 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:29:01,732 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/cluster_fa1b88cd-72d9-e50c-8179-5c107543bce8/data/data2/current/BP-2062197654-172.17.0.2-1731497307248 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:29:01,732 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:29:01,737 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2fe3cc5{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T11:29:01,737 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@512c947f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:29:01,737 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:29:01,737 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37aeaf30{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:29:01,737 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24dba92e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/hadoop.log.dir/,STOPPED} 2024-11-13T11:29:01,742 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T11:29:01,758 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T11:29:01,765 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 153) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43265 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43265 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:43265 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43265 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:43265 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:43265 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43265 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:43265 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=113 (was 127), ProcessCount=11 (was 11), AvailableMemoryMB=2193 (was 2826) 2024-11-13T11:29:01,772 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=113, ProcessCount=11, AvailableMemoryMB=2193 2024-11-13T11:29:01,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T11:29:01,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/hadoop.log.dir so I do NOT create it in target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe 2024-11-13T11:29:01,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01afac56-c867-d5ed-015d-6e86b28abb53/hadoop.tmp.dir so I do NOT create it in target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe 2024-11-13T11:29:01,772 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/cluster_67085c6e-5d8c-71db-9034-013da80d2922, deleteOnExit=true 2024-11-13T11:29:01,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T11:29:01,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/test.cache.data in system properties and HBase conf 2024-11-13T11:29:01,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T11:29:01,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/hadoop.log.dir in system properties and HBase conf 2024-11-13T11:29:01,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T11:29:01,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T11:29:01,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T11:29:01,773 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T11:29:01,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T11:29:01,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T11:29:01,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T11:29:01,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T11:29:01,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T11:29:01,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T11:29:01,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T11:29:01,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T11:29:01,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T11:29:01,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/nfs.dump.dir in system properties and HBase conf 2024-11-13T11:29:01,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/java.io.tmpdir in system properties and HBase conf 2024-11-13T11:29:01,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T11:29:01,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T11:29:01,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T11:29:01,787 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T11:29:01,834 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:29:01,840 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:29:01,845 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:29:01,845 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:29:01,845 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T11:29:01,846 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:29:01,846 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18e0f0d5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:29:01,846 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b6c619b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:29:01,940 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@593778be{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/java.io.tmpdir/jetty-localhost-41111-hadoop-hdfs-3_4_1-tests_jar-_-any-335461647459955323/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T11:29:01,941 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@391ebee0{HTTP/1.1, (http/1.1)}{localhost:41111} 2024-11-13T11:29:01,941 INFO [Time-limited test {}] server.Server(415): Started @184102ms 2024-11-13T11:29:01,953 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T11:29:01,995 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:29:01,999 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:29:02,002 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:29:02,002 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:29:02,002 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T11:29:02,003 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73038ac2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:29:02,003 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f373d12{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:29:02,096 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@403776ad{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/java.io.tmpdir/jetty-localhost-41751-hadoop-hdfs-3_4_1-tests_jar-_-any-14334666849940342320/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:29:02,097 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43fad179{HTTP/1.1, (http/1.1)}{localhost:41751} 2024-11-13T11:29:02,097 INFO [Time-limited test {}] server.Server(415): Started @184257ms 2024-11-13T11:29:02,098 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:29:02,121 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:29:02,124 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:29:02,125 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:29:02,125 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:29:02,125 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T11:29:02,125 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5090366a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:29:02,125 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6dc4d1f3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:29:02,151 WARN [Thread-1630 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/cluster_67085c6e-5d8c-71db-9034-013da80d2922/data/data2/current/BP-605604222-172.17.0.2-1731497341796/current, will proceed with Du for space computation calculation, 2024-11-13T11:29:02,151 WARN [Thread-1629 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/cluster_67085c6e-5d8c-71db-9034-013da80d2922/data/data1/current/BP-605604222-172.17.0.2-1731497341796/current, will proceed with Du for space computation calculation, 2024-11-13T11:29:02,166 WARN [Thread-1608 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:29:02,168 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbaec0aa617a558e6 with lease ID 0x45ba351892a78203: Processing first storage report for DS-51a90119-1a19-4731-83d4-5faf8103ea57 from datanode DatanodeRegistration(127.0.0.1:36997, datanodeUuid=6663ed8d-0bcf-43d3-bd0f-9ca84b10c6a6, infoPort=46607, infoSecurePort=0, ipcPort=37951, storageInfo=lv=-57;cid=testClusterID;nsid=1789460410;c=1731497341796) 2024-11-13T11:29:02,168 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbaec0aa617a558e6 with lease ID 0x45ba351892a78203: from storage DS-51a90119-1a19-4731-83d4-5faf8103ea57 node DatanodeRegistration(127.0.0.1:36997, datanodeUuid=6663ed8d-0bcf-43d3-bd0f-9ca84b10c6a6, infoPort=46607, infoSecurePort=0, ipcPort=37951, storageInfo=lv=-57;cid=testClusterID;nsid=1789460410;c=1731497341796), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:29:02,169 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbaec0aa617a558e6 with lease ID 0x45ba351892a78203: Processing first storage report for DS-f701b98f-6c63-4ae1-9808-3de28d5901ba from datanode DatanodeRegistration(127.0.0.1:36997, datanodeUuid=6663ed8d-0bcf-43d3-bd0f-9ca84b10c6a6, infoPort=46607, infoSecurePort=0, ipcPort=37951, storageInfo=lv=-57;cid=testClusterID;nsid=1789460410;c=1731497341796) 2024-11-13T11:29:02,169 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbaec0aa617a558e6 with lease ID 0x45ba351892a78203: from storage DS-f701b98f-6c63-4ae1-9808-3de28d5901ba node DatanodeRegistration(127.0.0.1:36997, datanodeUuid=6663ed8d-0bcf-43d3-bd0f-9ca84b10c6a6, infoPort=46607, infoSecurePort=0, ipcPort=37951, storageInfo=lv=-57;cid=testClusterID;nsid=1789460410;c=1731497341796), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:29:02,220 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@25901f18{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/java.io.tmpdir/jetty-localhost-46739-hadoop-hdfs-3_4_1-tests_jar-_-any-18023282223593273626/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:29:02,220 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@37f050e6{HTTP/1.1, (http/1.1)}{localhost:46739} 2024-11-13T11:29:02,220 INFO [Time-limited test {}] server.Server(415): Started @184381ms 2024-11-13T11:29:02,221 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:29:02,273 WARN [Thread-1655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/cluster_67085c6e-5d8c-71db-9034-013da80d2922/data/data3/current/BP-605604222-172.17.0.2-1731497341796/current, will proceed with Du for space computation calculation, 2024-11-13T11:29:02,273 WARN [Thread-1656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/cluster_67085c6e-5d8c-71db-9034-013da80d2922/data/data4/current/BP-605604222-172.17.0.2-1731497341796/current, will proceed with Du for space computation calculation, 2024-11-13T11:29:02,292 WARN [Thread-1644 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:29:02,294 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaab0128a1d254e52 with lease ID 0x45ba351892a78204: Processing first storage report for DS-859ef0d3-88c3-458c-9ef8-70cdbff808c3 from datanode DatanodeRegistration(127.0.0.1:34305, datanodeUuid=9af81504-50a9-49e7-8c91-fb409e0eac2b, infoPort=37589, infoSecurePort=0, ipcPort=43199, storageInfo=lv=-57;cid=testClusterID;nsid=1789460410;c=1731497341796) 2024-11-13T11:29:02,294 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaab0128a1d254e52 with lease ID 0x45ba351892a78204: from storage DS-859ef0d3-88c3-458c-9ef8-70cdbff808c3 node DatanodeRegistration(127.0.0.1:34305, datanodeUuid=9af81504-50a9-49e7-8c91-fb409e0eac2b, infoPort=37589, infoSecurePort=0, ipcPort=43199, storageInfo=lv=-57;cid=testClusterID;nsid=1789460410;c=1731497341796), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:29:02,294 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaab0128a1d254e52 with lease ID 0x45ba351892a78204: Processing first storage report for DS-031cc660-8ac5-4730-84fe-707fe85c0391 from datanode DatanodeRegistration(127.0.0.1:34305, datanodeUuid=9af81504-50a9-49e7-8c91-fb409e0eac2b, infoPort=37589, infoSecurePort=0, ipcPort=43199, storageInfo=lv=-57;cid=testClusterID;nsid=1789460410;c=1731497341796) 2024-11-13T11:29:02,294 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaab0128a1d254e52 with lease ID 0x45ba351892a78204: from storage DS-031cc660-8ac5-4730-84fe-707fe85c0391 node DatanodeRegistration(127.0.0.1:34305, datanodeUuid=9af81504-50a9-49e7-8c91-fb409e0eac2b, infoPort=37589, infoSecurePort=0, ipcPort=43199, storageInfo=lv=-57;cid=testClusterID;nsid=1789460410;c=1731497341796), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:29:02,343 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe 2024-11-13T11:29:02,347 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/cluster_67085c6e-5d8c-71db-9034-013da80d2922/zookeeper_0, clientPort=56388, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/cluster_67085c6e-5d8c-71db-9034-013da80d2922/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/cluster_67085c6e-5d8c-71db-9034-013da80d2922/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T11:29:02,349 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56388 2024-11-13T11:29:02,349 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:29:02,351 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:29:02,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741825_1001 (size=7) 2024-11-13T11:29:02,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741825_1001 (size=7) 2024-11-13T11:29:02,361 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270 with version=8 2024-11-13T11:29:02,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/hbase-staging 2024-11-13T11:29:02,363 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7bf281cf3991:0 server-side Connection retries=45 2024-11-13T11:29:02,363 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:29:02,363 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T11:29:02,363 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T11:29:02,363 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:29:02,363 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T11:29:02,363 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T11:29:02,363 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T11:29:02,364 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44663 2024-11-13T11:29:02,365 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44663 connecting to ZooKeeper ensemble=127.0.0.1:56388 2024-11-13T11:29:02,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:446630x0, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T11:29:02,369 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44663-0x10038d811f80000 connected 2024-11-13T11:29:02,383 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:29:02,385 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:29:02,386 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:29:02,386 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270, hbase.cluster.distributed=false 2024-11-13T11:29:02,388 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T11:29:02,388 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44663 2024-11-13T11:29:02,388 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44663 2024-11-13T11:29:02,388 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44663 2024-11-13T11:29:02,389 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44663 2024-11-13T11:29:02,389 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44663 2024-11-13T11:29:02,406 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7bf281cf3991:0 server-side Connection retries=45 2024-11-13T11:29:02,406 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:29:02,406 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T11:29:02,406 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T11:29:02,406 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:29:02,406 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T11:29:02,406 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T11:29:02,406 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T11:29:02,407 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35385 2024-11-13T11:29:02,408 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35385 connecting to ZooKeeper ensemble=127.0.0.1:56388 2024-11-13T11:29:02,408 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:29:02,410 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:29:02,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:353850x0, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T11:29:02,413 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:29:02,413 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35385-0x10038d811f80001 connected 2024-11-13T11:29:02,413 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T11:29:02,414 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T11:29:02,414 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T11:29:02,415 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T11:29:02,415 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35385 2024-11-13T11:29:02,416 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35385 2024-11-13T11:29:02,416 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35385 2024-11-13T11:29:02,416 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35385 2024-11-13T11:29:02,416 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35385 2024-11-13T11:29:02,428 DEBUG [M:0;7bf281cf3991:44663 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7bf281cf3991:44663 2024-11-13T11:29:02,428 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7bf281cf3991,44663,1731497342363 2024-11-13T11:29:02,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:29:02,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:29:02,430 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7bf281cf3991,44663,1731497342363 2024-11-13T11:29:02,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T11:29:02,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:02,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:02,431 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T11:29:02,431 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7bf281cf3991,44663,1731497342363 from backup master directory 2024-11-13T11:29:02,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7bf281cf3991,44663,1731497342363 2024-11-13T11:29:02,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:29:02,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:29:02,432 WARN [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T11:29:02,432 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7bf281cf3991,44663,1731497342363 2024-11-13T11:29:02,436 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/hbase.id] with ID: 1ebf1fc4-8fc2-4af9-bf72-2663dd57e273 2024-11-13T11:29:02,436 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/.tmp/hbase.id 2024-11-13T11:29:02,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741826_1002 (size=42) 2024-11-13T11:29:02,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741826_1002 (size=42) 2024-11-13T11:29:02,441 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/.tmp/hbase.id]:[hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/hbase.id] 2024-11-13T11:29:02,452 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:29:02,452 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T11:29:02,453 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-13T11:29:02,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:02,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:02,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741827_1003 (size=196) 2024-11-13T11:29:02,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741827_1003 (size=196) 2024-11-13T11:29:02,468 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T11:29:02,469 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T11:29:02,469 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:29:02,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741828_1004 (size=1189) 2024-11-13T11:29:02,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741828_1004 (size=1189) 2024-11-13T11:29:02,477 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store 2024-11-13T11:29:02,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741829_1005 (size=34) 2024-11-13T11:29:02,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741829_1005 (size=34) 2024-11-13T11:29:02,486 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:29:02,486 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T11:29:02,486 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:29:02,486 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:29:02,486 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T11:29:02,486 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:29:02,486 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:29:02,486 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731497342486Disabling compacts and flushes for region at 1731497342486Disabling writes for close at 1731497342486Writing region close event to WAL at 1731497342486Closed at 1731497342486 2024-11-13T11:29:02,487 WARN [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/.initializing 2024-11-13T11:29:02,488 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/WALs/7bf281cf3991,44663,1731497342363 2024-11-13T11:29:02,490 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C44663%2C1731497342363, suffix=, logDir=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/WALs/7bf281cf3991,44663,1731497342363, archiveDir=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/oldWALs, maxLogs=10 2024-11-13T11:29:02,491 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C44663%2C1731497342363.1731497342491 2024-11-13T11:29:02,496 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/WALs/7bf281cf3991,44663,1731497342363/7bf281cf3991%2C44663%2C1731497342363.1731497342491 2024-11-13T11:29:02,496 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37589:37589),(127.0.0.1/127.0.0.1:46607:46607)] 2024-11-13T11:29:02,498 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T11:29:02,498 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:29:02,499 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:02,499 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:02,501 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:02,502 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T11:29:02,502 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:02,503 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:29:02,503 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:02,504 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T11:29:02,504 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:02,504 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:29:02,504 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:02,505 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T11:29:02,505 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:02,506 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:29:02,506 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:02,507 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T11:29:02,507 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:02,507 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:29:02,507 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:02,508 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:02,508 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:02,510 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:02,510 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:02,510 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T11:29:02,511 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:02,513 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T11:29:02,514 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=795396, jitterRate=0.011399194598197937}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T11:29:02,514 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731497342499Initializing all the Stores at 1731497342499Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497342499Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497342501 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497342501Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497342501Cleaning up temporary data from old regions at 1731497342510 (+9 ms)Region opened successfully at 1731497342514 (+4 ms) 2024-11-13T11:29:02,515 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T11:29:02,518 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2033855c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7bf281cf3991/172.17.0.2:0 2024-11-13T11:29:02,519 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T11:29:02,519 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T11:29:02,519 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T11:29:02,519 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T11:29:02,520 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T11:29:02,520 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T11:29:02,520 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T11:29:02,522 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T11:29:02,522 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T11:29:02,523 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T11:29:02,523 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T11:29:02,524 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T11:29:02,524 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T11:29:02,525 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T11:29:02,526 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T11:29:02,526 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T11:29:02,527 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T11:29:02,528 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T11:29:02,529 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T11:29:02,530 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T11:29:02,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T11:29:02,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T11:29:02,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:02,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:02,531 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7bf281cf3991,44663,1731497342363, sessionid=0x10038d811f80000, setting cluster-up flag (Was=false) 2024-11-13T11:29:02,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:02,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:02,535 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T11:29:02,536 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7bf281cf3991,44663,1731497342363 2024-11-13T11:29:02,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:02,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:02,540 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T11:29:02,540 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7bf281cf3991,44663,1731497342363 2024-11-13T11:29:02,541 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T11:29:02,543 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T11:29:02,543 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T11:29:02,543 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T11:29:02,544 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7bf281cf3991,44663,1731497342363 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T11:29:02,545 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:29:02,545 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:29:02,545 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:29:02,545 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:29:02,545 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7bf281cf3991:0, corePoolSize=10, maxPoolSize=10 2024-11-13T11:29:02,545 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:02,545 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7bf281cf3991:0, corePoolSize=2, maxPoolSize=2 2024-11-13T11:29:02,545 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:02,546 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731497372546 2024-11-13T11:29:02,546 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T11:29:02,546 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T11:29:02,546 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T11:29:02,546 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T11:29:02,546 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T11:29:02,546 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T11:29:02,546 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:02,547 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:29:02,547 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T11:29:02,547 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T11:29:02,547 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T11:29:02,547 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T11:29:02,547 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T11:29:02,547 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T11:29:02,547 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497342547,5,FailOnTimeoutGroup] 2024-11-13T11:29:02,548 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497342547,5,FailOnTimeoutGroup] 2024-11-13T11:29:02,548 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:02,548 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T11:29:02,548 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:02,548 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:02,548 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:02,548 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T11:29:02,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741831_1007 (size=1321) 2024-11-13T11:29:02,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741831_1007 (size=1321) 2024-11-13T11:29:02,556 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T11:29:02,557 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270 2024-11-13T11:29:02,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741832_1008 (size=32) 2024-11-13T11:29:02,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741832_1008 (size=32) 2024-11-13T11:29:02,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:29:02,564 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T11:29:02,566 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T11:29:02,566 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:02,566 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:29:02,566 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T11:29:02,568 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T11:29:02,568 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:02,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:29:02,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T11:29:02,569 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T11:29:02,570 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:02,570 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:29:02,570 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T11:29:02,571 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T11:29:02,571 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:02,572 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:29:02,572 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T11:29:02,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740 2024-11-13T11:29:02,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740 2024-11-13T11:29:02,574 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T11:29:02,575 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T11:29:02,575 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T11:29:02,576 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T11:29:02,579 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T11:29:02,579 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805941, jitterRate=0.024807274341583252}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T11:29:02,580 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731497342563Initializing all the Stores at 1731497342564 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497342564Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497342564Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497342564Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497342564Cleaning up temporary data from old regions at 1731497342575 (+11 ms)Region opened successfully at 1731497342580 (+5 ms) 2024-11-13T11:29:02,580 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T11:29:02,580 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T11:29:02,580 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T11:29:02,580 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T11:29:02,580 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T11:29:02,581 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T11:29:02,581 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731497342580Disabling compacts and flushes for region at 1731497342580Disabling writes for close at 1731497342580Writing region close event to WAL at 1731497342581 (+1 ms)Closed at 1731497342581 2024-11-13T11:29:02,582 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:29:02,582 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T11:29:02,583 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T11:29:02,584 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T11:29:02,585 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T11:29:02,618 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.HRegionServer(746): ClusterId : 1ebf1fc4-8fc2-4af9-bf72-2663dd57e273 2024-11-13T11:29:02,618 DEBUG [RS:0;7bf281cf3991:35385 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T11:29:02,620 DEBUG [RS:0;7bf281cf3991:35385 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T11:29:02,620 DEBUG [RS:0;7bf281cf3991:35385 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T11:29:02,621 DEBUG [RS:0;7bf281cf3991:35385 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T11:29:02,621 DEBUG [RS:0;7bf281cf3991:35385 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b6aff89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7bf281cf3991/172.17.0.2:0 2024-11-13T11:29:02,632 DEBUG [RS:0;7bf281cf3991:35385 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7bf281cf3991:35385 2024-11-13T11:29:02,632 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T11:29:02,632 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T11:29:02,632 DEBUG [RS:0;7bf281cf3991:35385 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T11:29:02,633 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.HRegionServer(2659): reportForDuty to master=7bf281cf3991,44663,1731497342363 with port=35385, startcode=1731497342405 2024-11-13T11:29:02,633 DEBUG [RS:0;7bf281cf3991:35385 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T11:29:02,635 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38719, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T11:29:02,635 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44663 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7bf281cf3991,35385,1731497342405 2024-11-13T11:29:02,635 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44663 {}] master.ServerManager(517): Registering regionserver=7bf281cf3991,35385,1731497342405 2024-11-13T11:29:02,637 DEBUG [RS:0;7bf281cf3991:35385 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270 2024-11-13T11:29:02,637 DEBUG [RS:0;7bf281cf3991:35385 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38965 2024-11-13T11:29:02,637 DEBUG [RS:0;7bf281cf3991:35385 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T11:29:02,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T11:29:02,639 DEBUG [RS:0;7bf281cf3991:35385 {}] zookeeper.ZKUtil(111): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7bf281cf3991,35385,1731497342405 2024-11-13T11:29:02,639 WARN [RS:0;7bf281cf3991:35385 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T11:29:02,639 INFO [RS:0;7bf281cf3991:35385 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:29:02,639 DEBUG [RS:0;7bf281cf3991:35385 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/WALs/7bf281cf3991,35385,1731497342405 2024-11-13T11:29:02,639 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7bf281cf3991,35385,1731497342405] 2024-11-13T11:29:02,643 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T11:29:02,644 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T11:29:02,645 INFO [RS:0;7bf281cf3991:35385 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T11:29:02,645 INFO [RS:0;7bf281cf3991:35385 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:02,645 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T11:29:02,646 INFO [RS:0;7bf281cf3991:35385 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T11:29:02,646 INFO [RS:0;7bf281cf3991:35385 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:02,646 DEBUG [RS:0;7bf281cf3991:35385 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:02,646 DEBUG [RS:0;7bf281cf3991:35385 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:02,646 DEBUG [RS:0;7bf281cf3991:35385 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:02,646 DEBUG [RS:0;7bf281cf3991:35385 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:02,646 DEBUG [RS:0;7bf281cf3991:35385 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:02,646 DEBUG [RS:0;7bf281cf3991:35385 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7bf281cf3991:0, corePoolSize=2, maxPoolSize=2 2024-11-13T11:29:02,646 DEBUG [RS:0;7bf281cf3991:35385 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:02,646 DEBUG [RS:0;7bf281cf3991:35385 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:02,646 DEBUG [RS:0;7bf281cf3991:35385 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:02,646 DEBUG [RS:0;7bf281cf3991:35385 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:02,646 DEBUG [RS:0;7bf281cf3991:35385 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:02,646 DEBUG [RS:0;7bf281cf3991:35385 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:02,646 DEBUG [RS:0;7bf281cf3991:35385 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7bf281cf3991:0, corePoolSize=3, maxPoolSize=3 2024-11-13T11:29:02,647 DEBUG [RS:0;7bf281cf3991:35385 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0, corePoolSize=3, maxPoolSize=3 2024-11-13T11:29:02,648 INFO [RS:0;7bf281cf3991:35385 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:02,648 INFO [RS:0;7bf281cf3991:35385 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:02,648 INFO [RS:0;7bf281cf3991:35385 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:02,648 INFO [RS:0;7bf281cf3991:35385 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:02,648 INFO [RS:0;7bf281cf3991:35385 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:02,648 INFO [RS:0;7bf281cf3991:35385 {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,35385,1731497342405-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T11:29:02,663 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T11:29:02,663 INFO [RS:0;7bf281cf3991:35385 {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,35385,1731497342405-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:02,664 INFO [RS:0;7bf281cf3991:35385 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:02,664 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.Replication(171): 7bf281cf3991,35385,1731497342405 started 2024-11-13T11:29:02,679 INFO [RS:0;7bf281cf3991:35385 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:02,679 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.HRegionServer(1482): Serving as 7bf281cf3991,35385,1731497342405, RpcServer on 7bf281cf3991/172.17.0.2:35385, sessionid=0x10038d811f80001 2024-11-13T11:29:02,679 DEBUG [RS:0;7bf281cf3991:35385 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T11:29:02,679 DEBUG [RS:0;7bf281cf3991:35385 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7bf281cf3991,35385,1731497342405 2024-11-13T11:29:02,679 DEBUG [RS:0;7bf281cf3991:35385 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7bf281cf3991,35385,1731497342405' 2024-11-13T11:29:02,679 DEBUG [RS:0;7bf281cf3991:35385 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T11:29:02,680 DEBUG [RS:0;7bf281cf3991:35385 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T11:29:02,681 DEBUG [RS:0;7bf281cf3991:35385 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T11:29:02,681 DEBUG [RS:0;7bf281cf3991:35385 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T11:29:02,681 DEBUG [RS:0;7bf281cf3991:35385 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7bf281cf3991,35385,1731497342405 2024-11-13T11:29:02,681 DEBUG [RS:0;7bf281cf3991:35385 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7bf281cf3991,35385,1731497342405' 2024-11-13T11:29:02,681 DEBUG [RS:0;7bf281cf3991:35385 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T11:29:02,681 DEBUG [RS:0;7bf281cf3991:35385 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T11:29:02,682 DEBUG [RS:0;7bf281cf3991:35385 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T11:29:02,682 INFO [RS:0;7bf281cf3991:35385 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T11:29:02,682 INFO [RS:0;7bf281cf3991:35385 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T11:29:02,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:02,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:02,736 WARN [7bf281cf3991:44663 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T11:29:02,784 INFO [RS:0;7bf281cf3991:35385 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C35385%2C1731497342405, suffix=, logDir=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/WALs/7bf281cf3991,35385,1731497342405, archiveDir=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/oldWALs, maxLogs=32 2024-11-13T11:29:02,784 INFO [RS:0;7bf281cf3991:35385 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C35385%2C1731497342405.1731497342784 2024-11-13T11:29:02,790 INFO [RS:0;7bf281cf3991:35385 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/WALs/7bf281cf3991,35385,1731497342405/7bf281cf3991%2C35385%2C1731497342405.1731497342784 2024-11-13T11:29:02,791 DEBUG [RS:0;7bf281cf3991:35385 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37589:37589),(127.0.0.1/127.0.0.1:46607:46607)] 2024-11-13T11:29:02,986 DEBUG [7bf281cf3991:44663 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T11:29:02,987 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7bf281cf3991,35385,1731497342405 2024-11-13T11:29:02,989 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7bf281cf3991,35385,1731497342405, state=OPENING 2024-11-13T11:29:02,991 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T11:29:02,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:02,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:02,994 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T11:29:02,994 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:29:02,994 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:29:02,994 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7bf281cf3991,35385,1731497342405}] 2024-11-13T11:29:03,149 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T11:29:03,154 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56577, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T11:29:03,159 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T11:29:03,159 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:29:03,161 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C35385%2C1731497342405.meta, suffix=.meta, logDir=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/WALs/7bf281cf3991,35385,1731497342405, archiveDir=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/oldWALs, maxLogs=32 2024-11-13T11:29:03,161 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C35385%2C1731497342405.meta.1731497343161.meta 2024-11-13T11:29:03,166 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/WALs/7bf281cf3991,35385,1731497342405/7bf281cf3991%2C35385%2C1731497342405.meta.1731497343161.meta 2024-11-13T11:29:03,168 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37589:37589),(127.0.0.1/127.0.0.1:46607:46607)] 2024-11-13T11:29:03,168 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T11:29:03,169 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T11:29:03,169 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T11:29:03,169 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T11:29:03,169 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T11:29:03,169 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:29:03,169 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T11:29:03,169 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T11:29:03,172 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T11:29:03,173 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T11:29:03,173 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:03,173 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:29:03,174 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T11:29:03,175 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T11:29:03,175 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:03,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:29:03,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T11:29:03,176 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T11:29:03,176 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:03,177 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:29:03,177 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T11:29:03,178 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T11:29:03,178 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:03,178 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:29:03,178 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T11:29:03,180 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740 2024-11-13T11:29:03,181 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740 2024-11-13T11:29:03,182 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T11:29:03,182 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T11:29:03,182 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T11:29:03,183 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T11:29:03,184 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=754492, jitterRate=-0.04061497747898102}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T11:29:03,184 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T11:29:03,184 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731497343169Writing region info on filesystem at 1731497343170 (+1 ms)Initializing all the Stores at 1731497343171 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497343171Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497343171Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497343171Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497343171Cleaning up temporary data from old regions at 1731497343182 (+11 ms)Running coprocessor post-open hooks at 1731497343184 (+2 ms)Region opened successfully at 1731497343184 2024-11-13T11:29:03,185 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731497343149 2024-11-13T11:29:03,187 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T11:29:03,187 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T11:29:03,188 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7bf281cf3991,35385,1731497342405 2024-11-13T11:29:03,189 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7bf281cf3991,35385,1731497342405, state=OPEN 2024-11-13T11:29:03,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T11:29:03,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T11:29:03,191 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7bf281cf3991,35385,1731497342405 2024-11-13T11:29:03,191 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:29:03,191 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:29:03,193 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T11:29:03,193 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7bf281cf3991,35385,1731497342405 in 197 msec 2024-11-13T11:29:03,195 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T11:29:03,195 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 610 msec 2024-11-13T11:29:03,196 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:29:03,196 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T11:29:03,197 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T11:29:03,197 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7bf281cf3991,35385,1731497342405, seqNum=-1] 2024-11-13T11:29:03,198 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T11:29:03,199 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45843, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T11:29:03,205 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 661 msec 2024-11-13T11:29:03,205 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731497343205, completionTime=-1 2024-11-13T11:29:03,205 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T11:29:03,205 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T11:29:03,207 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T11:29:03,207 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731497403207 2024-11-13T11:29:03,207 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731497463207 2024-11-13T11:29:03,207 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-13T11:29:03,207 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,44663,1731497342363-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:03,207 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,44663,1731497342363-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:03,207 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,44663,1731497342363-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:03,207 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7bf281cf3991:44663, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:03,207 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:03,208 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:03,209 DEBUG [master/7bf281cf3991:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T11:29:03,210 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.778sec 2024-11-13T11:29:03,211 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T11:29:03,211 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T11:29:03,211 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T11:29:03,211 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T11:29:03,211 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T11:29:03,211 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,44663,1731497342363-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T11:29:03,211 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,44663,1731497342363-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T11:29:03,213 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T11:29:03,213 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T11:29:03,213 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,44663,1731497342363-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:03,218 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27c0b6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:29:03,218 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7bf281cf3991,44663,-1 for getting cluster id 2024-11-13T11:29:03,219 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T11:29:03,221 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '1ebf1fc4-8fc2-4af9-bf72-2663dd57e273' 2024-11-13T11:29:03,221 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T11:29:03,221 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "1ebf1fc4-8fc2-4af9-bf72-2663dd57e273" 2024-11-13T11:29:03,221 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@227c1be3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:29:03,221 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7bf281cf3991,44663,-1] 2024-11-13T11:29:03,222 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T11:29:03,222 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:29:03,223 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40882, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T11:29:03,225 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11583d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:29:03,225 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T11:29:03,226 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7bf281cf3991,35385,1731497342405, seqNum=-1] 2024-11-13T11:29:03,227 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T11:29:03,228 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46794, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T11:29:03,230 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7bf281cf3991,44663,1731497342363 2024-11-13T11:29:03,231 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:29:03,234 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T11:29:03,235 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T11:29:03,236 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 7bf281cf3991,44663,1731497342363 2024-11-13T11:29:03,236 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@682cbcf9 2024-11-13T11:29:03,236 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T11:29:03,238 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40886, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T11:29:03,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-13T11:29:03,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-13T11:29:03,239 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T11:29:03,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T11:29:03,242 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T11:29:03,242 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:03,242 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-13T11:29:03,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T11:29:03,243 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T11:29:03,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741835_1011 (size=405) 2024-11-13T11:29:03,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741835_1011 (size=405) 2024-11-13T11:29:03,252 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7b473607ce168f671b5bbbab0d1d756e, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270 2024-11-13T11:29:03,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741836_1012 (size=88) 2024-11-13T11:29:03,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741836_1012 (size=88) 2024-11-13T11:29:03,259 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:29:03,259 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 7b473607ce168f671b5bbbab0d1d756e, disabling compactions & flushes 2024-11-13T11:29:03,259 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:03,259 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:03,259 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. after waiting 0 ms 2024-11-13T11:29:03,259 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:03,259 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:03,259 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7b473607ce168f671b5bbbab0d1d756e: Waiting for close lock at 1731497343259Disabling compacts and flushes for region at 1731497343259Disabling writes for close at 1731497343259Writing region close event to WAL at 1731497343259Closed at 1731497343259 2024-11-13T11:29:03,261 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T11:29:03,261 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731497343261"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731497343261"}]},"ts":"1731497343261"} 2024-11-13T11:29:03,263 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T11:29:03,264 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T11:29:03,264 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731497343264"}]},"ts":"1731497343264"} 2024-11-13T11:29:03,266 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-13T11:29:03,266 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=7b473607ce168f671b5bbbab0d1d756e, ASSIGN}] 2024-11-13T11:29:03,267 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=7b473607ce168f671b5bbbab0d1d756e, ASSIGN 2024-11-13T11:29:03,268 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=7b473607ce168f671b5bbbab0d1d756e, ASSIGN; state=OFFLINE, location=7bf281cf3991,35385,1731497342405; forceNewPlan=false, retain=false 2024-11-13T11:29:03,419 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7b473607ce168f671b5bbbab0d1d756e, regionState=OPENING, regionLocation=7bf281cf3991,35385,1731497342405 2024-11-13T11:29:03,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=7b473607ce168f671b5bbbab0d1d756e, ASSIGN because future has completed 2024-11-13T11:29:03,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7b473607ce168f671b5bbbab0d1d756e, server=7bf281cf3991,35385,1731497342405}] 2024-11-13T11:29:03,590 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:03,590 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 7b473607ce168f671b5bbbab0d1d756e, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e.', STARTKEY => '', ENDKEY => ''} 2024-11-13T11:29:03,590 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 7b473607ce168f671b5bbbab0d1d756e 2024-11-13T11:29:03,590 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:29:03,590 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 7b473607ce168f671b5bbbab0d1d756e 2024-11-13T11:29:03,590 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 7b473607ce168f671b5bbbab0d1d756e 2024-11-13T11:29:03,592 INFO [StoreOpener-7b473607ce168f671b5bbbab0d1d756e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7b473607ce168f671b5bbbab0d1d756e 2024-11-13T11:29:03,593 INFO [StoreOpener-7b473607ce168f671b5bbbab0d1d756e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7b473607ce168f671b5bbbab0d1d756e columnFamilyName info 2024-11-13T11:29:03,594 DEBUG [StoreOpener-7b473607ce168f671b5bbbab0d1d756e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:03,594 INFO [StoreOpener-7b473607ce168f671b5bbbab0d1d756e-1 {}] regionserver.HStore(327): Store=7b473607ce168f671b5bbbab0d1d756e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:29:03,594 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 7b473607ce168f671b5bbbab0d1d756e 2024-11-13T11:29:03,595 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e 2024-11-13T11:29:03,595 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e 2024-11-13T11:29:03,596 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 7b473607ce168f671b5bbbab0d1d756e 2024-11-13T11:29:03,596 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 7b473607ce168f671b5bbbab0d1d756e 2024-11-13T11:29:03,598 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 7b473607ce168f671b5bbbab0d1d756e 2024-11-13T11:29:03,599 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T11:29:03,600 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 7b473607ce168f671b5bbbab0d1d756e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=819667, jitterRate=0.04226130247116089}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T11:29:03,600 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7b473607ce168f671b5bbbab0d1d756e 2024-11-13T11:29:03,601 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 7b473607ce168f671b5bbbab0d1d756e: Running coprocessor pre-open hook at 1731497343590Writing region info on filesystem at 1731497343590Initializing all the Stores at 1731497343591 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497343592 (+1 ms)Cleaning up temporary data from old regions at 1731497343596 (+4 ms)Running coprocessor post-open hooks at 1731497343600 (+4 ms)Region opened successfully at 1731497343600 2024-11-13T11:29:03,601 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e., pid=6, masterSystemTime=1731497343583 2024-11-13T11:29:03,604 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:03,604 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:03,605 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7b473607ce168f671b5bbbab0d1d756e, regionState=OPEN, openSeqNum=2, regionLocation=7bf281cf3991,35385,1731497342405 2024-11-13T11:29:03,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7b473607ce168f671b5bbbab0d1d756e, server=7bf281cf3991,35385,1731497342405 because future has completed 2024-11-13T11:29:03,607 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44663 {}] assignment.AssignmentManager(1535): Unable to acquire lock for regionNode state=OPEN, location=7bf281cf3991,35385,1731497342405, table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=7b473607ce168f671b5bbbab0d1d756e. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-13T11:29:03,611 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T11:29:03,611 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 7b473607ce168f671b5bbbab0d1d756e, server=7bf281cf3991,35385,1731497342405 in 181 msec 2024-11-13T11:29:03,614 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T11:29:03,614 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=7b473607ce168f671b5bbbab0d1d756e, ASSIGN in 345 msec 2024-11-13T11:29:03,614 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T11:29:03,615 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731497343615"}]},"ts":"1731497343615"} 2024-11-13T11:29:03,616 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-13T11:29:03,617 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T11:29:03,619 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 378 msec 2024-11-13T11:29:03,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:03,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:03,965 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T11:29:03,966 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T11:29:03,969 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T11:29:03,969 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T11:29:03,969 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T11:29:03,969 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-13T11:29:04,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:04,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:05,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:05,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:06,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:06,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:07,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:07,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:08,671 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T11:29:08,672 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:08,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:08,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:08,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:08,674 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:08,674 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:08,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:08,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:08,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:08,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:08,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:08,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:08,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:08,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:08,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:08,704 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:08,709 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T11:29:08,710 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-13T11:29:08,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:08,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:09,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:09,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:10,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:10,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:11,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:11,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:12,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:12,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:13,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T11:29:13,311 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-13T11:29:13,311 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-13T11:29:13,313 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T11:29:13,313 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:13,316 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e., hostname=7bf281cf3991,35385,1731497342405, seqNum=2] 2024-11-13T11:29:13,323 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T11:29:13,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T11:29:13,330 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-13T11:29:13,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-13T11:29:13,332 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-13T11:29:13,333 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-13T11:29:13,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35385 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-13T11:29:13,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:13,496 INFO [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 7b473607ce168f671b5bbbab0d1d756e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T11:29:13,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/.tmp/info/4e7ecdc527df4494892d51076614af69 is 1080, key is row0001/info:/1731497353317/Put/seqid=0 2024-11-13T11:29:13,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741837_1013 (size=6033) 2024-11-13T11:29:13,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741837_1013 (size=6033) 2024-11-13T11:29:13,522 INFO [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/.tmp/info/4e7ecdc527df4494892d51076614af69 2024-11-13T11:29:13,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/.tmp/info/4e7ecdc527df4494892d51076614af69 as hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/4e7ecdc527df4494892d51076614af69 2024-11-13T11:29:13,533 INFO [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/4e7ecdc527df4494892d51076614af69, entries=1, sequenceid=5, filesize=5.9 K 2024-11-13T11:29:13,534 INFO [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 7b473607ce168f671b5bbbab0d1d756e in 38ms, sequenceid=5, compaction requested=false 2024-11-13T11:29:13,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 7b473607ce168f671b5bbbab0d1d756e: 2024-11-13T11:29:13,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:13,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-13T11:29:13,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-13T11:29:13,542 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-13T11:29:13,542 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 206 msec 2024-11-13T11:29:13,544 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 218 msec 2024-11-13T11:29:13,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:13,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:14,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:14,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:15,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:15,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:16,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:16,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:17,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:17,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:18,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:18,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:19,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:19,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:20,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:20,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:21,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:21,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:22,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:22,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:23,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-13T11:29:23,343 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-13T11:29:23,351 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T11:29:23,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T11:29:23,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-13T11:29:23,354 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-13T11:29:23,356 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-13T11:29:23,356 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-13T11:29:23,512 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35385 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-13T11:29:23,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:23,514 INFO [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 7b473607ce168f671b5bbbab0d1d756e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T11:29:23,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/.tmp/info/8ceaa04fe6f245cfa4077ad9415ce8d2 is 1080, key is row0002/info:/1731497363346/Put/seqid=0 2024-11-13T11:29:23,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741838_1014 (size=6033) 2024-11-13T11:29:23,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741838_1014 (size=6033) 2024-11-13T11:29:23,530 INFO [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/.tmp/info/8ceaa04fe6f245cfa4077ad9415ce8d2 2024-11-13T11:29:23,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/.tmp/info/8ceaa04fe6f245cfa4077ad9415ce8d2 as hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/8ceaa04fe6f245cfa4077ad9415ce8d2 2024-11-13T11:29:23,543 INFO [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/8ceaa04fe6f245cfa4077ad9415ce8d2, entries=1, sequenceid=9, filesize=5.9 K 2024-11-13T11:29:23,544 INFO [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 7b473607ce168f671b5bbbab0d1d756e in 30ms, sequenceid=9, compaction requested=false 2024-11-13T11:29:23,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 7b473607ce168f671b5bbbab0d1d756e: 2024-11-13T11:29:23,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:23,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-13T11:29:23,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-13T11:29:23,548 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-13T11:29:23,548 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 190 msec 2024-11-13T11:29:23,551 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 198 msec 2024-11-13T11:29:23,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:23,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:24,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:24,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:25,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:25,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:26,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:26,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:27,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:27,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:28,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:28,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:29,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:29,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:29,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta after 68060ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:29:29,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 after 68070ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T11:29:30,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:30,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:31,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:31,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:32,343 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T11:29:32,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:32,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:33,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-13T11:29:33,381 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-13T11:29:33,385 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C35385%2C1731497342405.1731497373385 2024-11-13T11:29:33,390 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:33,390 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:33,390 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:33,391 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:33,391 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:33,391 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/WALs/7bf281cf3991,35385,1731497342405/7bf281cf3991%2C35385%2C1731497342405.1731497342784 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/WALs/7bf281cf3991,35385,1731497342405/7bf281cf3991%2C35385%2C1731497342405.1731497373385 2024-11-13T11:29:33,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741833_1009 (size=5546) 2024-11-13T11:29:33,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741833_1009 (size=5546) 2024-11-13T11:29:33,401 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46607:46607),(127.0.0.1/127.0.0.1:37589:37589)] 2024-11-13T11:29:33,402 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T11:29:33,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T11:29:33,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-13T11:29:33,404 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-13T11:29:33,405 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-13T11:29:33,406 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-13T11:29:33,558 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35385 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-13T11:29:33,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:33,559 INFO [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 7b473607ce168f671b5bbbab0d1d756e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T11:29:33,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/.tmp/info/7cbbe38572dc47f99aefa85fec1db541 is 1080, key is row0003/info:/1731497373383/Put/seqid=0 2024-11-13T11:29:33,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741840_1016 (size=6033) 2024-11-13T11:29:33,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741840_1016 (size=6033) 2024-11-13T11:29:33,572 INFO [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/.tmp/info/7cbbe38572dc47f99aefa85fec1db541 2024-11-13T11:29:33,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/.tmp/info/7cbbe38572dc47f99aefa85fec1db541 as hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/7cbbe38572dc47f99aefa85fec1db541 2024-11-13T11:29:33,584 INFO [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/7cbbe38572dc47f99aefa85fec1db541, entries=1, sequenceid=13, filesize=5.9 K 2024-11-13T11:29:33,585 INFO [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 7b473607ce168f671b5bbbab0d1d756e in 26ms, sequenceid=13, compaction requested=true 2024-11-13T11:29:33,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 7b473607ce168f671b5bbbab0d1d756e: 2024-11-13T11:29:33,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:33,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-13T11:29:33,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-13T11:29:33,589 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-13T11:29:33,589 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 181 msec 2024-11-13T11:29:33,591 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 188 msec 2024-11-13T11:29:33,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:33,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:34,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:34,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:35,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:35,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:36,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:36,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:37,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:37,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:38,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:38,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:39,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:39,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:40,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:40,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:41,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:41,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:42,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:42,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:43,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-13T11:29:43,452 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-13T11:29:43,453 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T11:29:43,457 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T11:29:43,457 DEBUG [Time-limited test {}] regionserver.HStore(1541): 7b473607ce168f671b5bbbab0d1d756e/info is initiating minor compaction (all files) 2024-11-13T11:29:43,457 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T11:29:43,457 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:43,458 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 7b473607ce168f671b5bbbab0d1d756e/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:43,458 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/4e7ecdc527df4494892d51076614af69, hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/8ceaa04fe6f245cfa4077ad9415ce8d2, hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/7cbbe38572dc47f99aefa85fec1db541] into tmpdir=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/.tmp, totalSize=17.7 K 2024-11-13T11:29:43,459 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 4e7ecdc527df4494892d51076614af69, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731497353317 2024-11-13T11:29:43,461 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 8ceaa04fe6f245cfa4077ad9415ce8d2, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731497363346 2024-11-13T11:29:43,462 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 7cbbe38572dc47f99aefa85fec1db541, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731497373383 2024-11-13T11:29:43,476 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 7b473607ce168f671b5bbbab0d1d756e#info#compaction#44 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T11:29:43,477 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/.tmp/info/4889b26055c34e58bca65e0c895046cb is 1080, key is row0001/info:/1731497353317/Put/seqid=0 2024-11-13T11:29:43,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741841_1017 (size=8296) 2024-11-13T11:29:43,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741841_1017 (size=8296) 2024-11-13T11:29:43,491 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/.tmp/info/4889b26055c34e58bca65e0c895046cb as hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/4889b26055c34e58bca65e0c895046cb 2024-11-13T11:29:43,499 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 7b473607ce168f671b5bbbab0d1d756e/info of 7b473607ce168f671b5bbbab0d1d756e into 4889b26055c34e58bca65e0c895046cb(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T11:29:43,499 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 7b473607ce168f671b5bbbab0d1d756e: 2024-11-13T11:29:43,501 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C35385%2C1731497342405.1731497383501 2024-11-13T11:29:43,507 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:43,507 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:43,508 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:43,508 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:43,508 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:43,508 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/WALs/7bf281cf3991,35385,1731497342405/7bf281cf3991%2C35385%2C1731497342405.1731497373385 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/WALs/7bf281cf3991,35385,1731497342405/7bf281cf3991%2C35385%2C1731497342405.1731497383501 2024-11-13T11:29:43,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741839_1015 (size=2520) 2024-11-13T11:29:43,509 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46607:46607),(127.0.0.1/127.0.0.1:37589:37589)] 2024-11-13T11:29:43,510 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/WALs/7bf281cf3991,35385,1731497342405/7bf281cf3991%2C35385%2C1731497342405.1731497373385 is not closed yet, will try archiving it next time 2024-11-13T11:29:43,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741839_1015 (size=2520) 2024-11-13T11:29:43,510 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/WALs/7bf281cf3991,35385,1731497342405/7bf281cf3991%2C35385%2C1731497342405.1731497342784 to hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/oldWALs/7bf281cf3991%2C35385%2C1731497342405.1731497342784 2024-11-13T11:29:43,511 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T11:29:43,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T11:29:43,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-13T11:29:43,513 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-13T11:29:43,514 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-13T11:29:43,514 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-13T11:29:43,668 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35385 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-13T11:29:43,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:43,669 INFO [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 7b473607ce168f671b5bbbab0d1d756e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T11:29:43,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/.tmp/info/20a647f2842d425fa98d990f1f736ce7 is 1080, key is row0000/info:/1731497383500/Put/seqid=0 2024-11-13T11:29:43,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741843_1019 (size=6033) 2024-11-13T11:29:43,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741843_1019 (size=6033) 2024-11-13T11:29:43,684 INFO [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/.tmp/info/20a647f2842d425fa98d990f1f736ce7 2024-11-13T11:29:43,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/.tmp/info/20a647f2842d425fa98d990f1f736ce7 as hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/20a647f2842d425fa98d990f1f736ce7 2024-11-13T11:29:43,696 INFO [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/20a647f2842d425fa98d990f1f736ce7, entries=1, sequenceid=18, filesize=5.9 K 2024-11-13T11:29:43,697 INFO [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 7b473607ce168f671b5bbbab0d1d756e in 29ms, sequenceid=18, compaction requested=false 2024-11-13T11:29:43,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 7b473607ce168f671b5bbbab0d1d756e: 2024-11-13T11:29:43,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:43,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-13T11:29:43,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-13T11:29:43,702 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-13T11:29:43,702 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 186 msec 2024-11-13T11:29:43,705 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 192 msec 2024-11-13T11:29:43,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:43,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:43,909 INFO [master/7bf281cf3991:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-13T11:29:43,909 INFO [master/7bf281cf3991:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-13T11:29:44,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:44,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:45,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:45,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:46,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:46,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:47,763 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:47,763 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:48,591 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 7b473607ce168f671b5bbbab0d1d756e, had cached 0 bytes from a total of 14329 2024-11-13T11:29:48,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:48,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:49,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:49,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:50,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:50,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:51,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:51,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:52,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:52,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:53,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44663 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-13T11:29:53,572 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-13T11:29:53,580 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C35385%2C1731497342405.1731497393579 2024-11-13T11:29:53,590 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:53,590 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:53,590 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:53,591 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:53,591 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:53,591 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/WALs/7bf281cf3991,35385,1731497342405/7bf281cf3991%2C35385%2C1731497342405.1731497383501 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/WALs/7bf281cf3991,35385,1731497342405/7bf281cf3991%2C35385%2C1731497342405.1731497393579 2024-11-13T11:29:53,592 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37589:37589),(127.0.0.1/127.0.0.1:46607:46607)] 2024-11-13T11:29:53,592 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/WALs/7bf281cf3991,35385,1731497342405/7bf281cf3991%2C35385%2C1731497342405.1731497383501 is not closed yet, will try archiving it next time 2024-11-13T11:29:53,592 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/WALs/7bf281cf3991,35385,1731497342405/7bf281cf3991%2C35385%2C1731497342405.1731497373385 to hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/oldWALs/7bf281cf3991%2C35385%2C1731497342405.1731497373385 2024-11-13T11:29:53,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T11:29:53,592 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T11:29:53,593 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:29:53,593 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:29:53,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741842_1018 (size=2026) 2024-11-13T11:29:53,593 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:29:53,593 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T11:29:53,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741842_1018 (size=2026) 2024-11-13T11:29:53,594 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T11:29:53,594 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1156108172, stopped=false 2024-11-13T11:29:53,594 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7bf281cf3991,44663,1731497342363 2024-11-13T11:29:53,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T11:29:53,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T11:29:53,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:53,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:53,595 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T11:29:53,596 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:29:53,596 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T11:29:53,596 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:29:53,596 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:29:53,597 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:29:53,597 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7bf281cf3991,35385,1731497342405' ***** 2024-11-13T11:29:53,597 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T11:29:53,597 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T11:29:53,597 INFO [RS:0;7bf281cf3991:35385 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T11:29:53,597 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T11:29:53,597 INFO [RS:0;7bf281cf3991:35385 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T11:29:53,598 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.HRegionServer(3091): Received CLOSE for 7b473607ce168f671b5bbbab0d1d756e 2024-11-13T11:29:53,598 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.HRegionServer(959): stopping server 7bf281cf3991,35385,1731497342405 2024-11-13T11:29:53,598 INFO [RS:0;7bf281cf3991:35385 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T11:29:53,598 INFO [RS:0;7bf281cf3991:35385 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7bf281cf3991:35385. 2024-11-13T11:29:53,598 DEBUG [RS:0;7bf281cf3991:35385 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:29:53,598 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 7b473607ce168f671b5bbbab0d1d756e, disabling compactions & flushes 2024-11-13T11:29:53,598 DEBUG [RS:0;7bf281cf3991:35385 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:29:53,598 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:53,598 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:53,598 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. after waiting 0 ms 2024-11-13T11:29:53,598 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T11:29:53,598 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:53,598 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T11:29:53,598 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T11:29:53,598 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 7b473607ce168f671b5bbbab0d1d756e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T11:29:53,598 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T11:29:53,599 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-13T11:29:53,599 DEBUG [RS:0;7bf281cf3991:35385 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 7b473607ce168f671b5bbbab0d1d756e=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e.} 2024-11-13T11:29:53,599 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T11:29:53,599 DEBUG [RS:0;7bf281cf3991:35385 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 7b473607ce168f671b5bbbab0d1d756e 2024-11-13T11:29:53,599 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T11:29:53,599 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T11:29:53,599 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T11:29:53,599 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T11:29:53,599 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-13T11:29:53,603 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/.tmp/info/ac8b2d52e57c4a1980758eb2c8f7859c is 1080, key is row0001/info:/1731497393575/Put/seqid=0 2024-11-13T11:29:53,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741845_1021 (size=6033) 2024-11-13T11:29:53,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741845_1021 (size=6033) 2024-11-13T11:29:53,610 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/.tmp/info/ac8b2d52e57c4a1980758eb2c8f7859c 2024-11-13T11:29:53,616 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/.tmp/info/ac8b2d52e57c4a1980758eb2c8f7859c as hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/ac8b2d52e57c4a1980758eb2c8f7859c 2024-11-13T11:29:53,620 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740/.tmp/info/ef3f670477a54316944f7917554e586c is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e./info:regioninfo/1731497343604/Put/seqid=0 2024-11-13T11:29:53,622 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/ac8b2d52e57c4a1980758eb2c8f7859c, entries=1, sequenceid=22, filesize=5.9 K 2024-11-13T11:29:53,624 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 7b473607ce168f671b5bbbab0d1d756e in 26ms, sequenceid=22, compaction requested=true 2024-11-13T11:29:53,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741846_1022 (size=7308) 2024-11-13T11:29:53,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741846_1022 (size=7308) 2024-11-13T11:29:53,627 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740/.tmp/info/ef3f670477a54316944f7917554e586c 2024-11-13T11:29:53,633 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/4e7ecdc527df4494892d51076614af69, hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/8ceaa04fe6f245cfa4077ad9415ce8d2, hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/7cbbe38572dc47f99aefa85fec1db541] to archive 2024-11-13T11:29:53,634 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T11:29:53,636 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/4e7ecdc527df4494892d51076614af69 to hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/4e7ecdc527df4494892d51076614af69 2024-11-13T11:29:53,637 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/8ceaa04fe6f245cfa4077ad9415ce8d2 to hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/8ceaa04fe6f245cfa4077ad9415ce8d2 2024-11-13T11:29:53,638 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/7cbbe38572dc47f99aefa85fec1db541 to hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/info/7cbbe38572dc47f99aefa85fec1db541 2024-11-13T11:29:53,639 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=7bf281cf3991:44663 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-13T11:29:53,639 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [4e7ecdc527df4494892d51076614af69=6033, 8ceaa04fe6f245cfa4077ad9415ce8d2=6033, 7cbbe38572dc47f99aefa85fec1db541=6033] 2024-11-13T11:29:53,643 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b473607ce168f671b5bbbab0d1d756e/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-13T11:29:53,644 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:53,644 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 7b473607ce168f671b5bbbab0d1d756e: Waiting for close lock at 1731497393598Running coprocessor pre-close hooks at 1731497393598Disabling compacts and flushes for region at 1731497393598Disabling writes for close at 1731497393598Obtaining lock to block concurrent updates at 1731497393598Preparing flush snapshotting stores in 7b473607ce168f671b5bbbab0d1d756e at 1731497393598Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731497393599 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. at 1731497393599Flushing 7b473607ce168f671b5bbbab0d1d756e/info: creating writer at 1731497393600 (+1 ms)Flushing 7b473607ce168f671b5bbbab0d1d756e/info: appending metadata at 1731497393602 (+2 ms)Flushing 7b473607ce168f671b5bbbab0d1d756e/info: closing flushed file at 1731497393602Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28b2d523: reopening flushed file at 1731497393616 (+14 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 7b473607ce168f671b5bbbab0d1d756e in 26ms, sequenceid=22, compaction requested=true at 1731497393624 (+8 ms)Writing region close event to WAL at 1731497393640 (+16 ms)Running coprocessor post-close hooks at 1731497393644 (+4 ms)Closed at 1731497393644 2024-11-13T11:29:53,644 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731497343238.7b473607ce168f671b5bbbab0d1d756e. 2024-11-13T11:29:53,650 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740/.tmp/ns/950f601fbfe04a7aa3a9342b58f8ebb9 is 43, key is default/ns:d/1731497343200/Put/seqid=0 2024-11-13T11:29:53,653 INFO [regionserver/7bf281cf3991:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T11:29:53,653 INFO [regionserver/7bf281cf3991:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T11:29:53,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741847_1023 (size=5153) 2024-11-13T11:29:53,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741847_1023 (size=5153) 2024-11-13T11:29:53,656 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740/.tmp/ns/950f601fbfe04a7aa3a9342b58f8ebb9 2024-11-13T11:29:53,674 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740/.tmp/table/0e0aebb6251047e7bd045420e0d469a8 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731497343615/Put/seqid=0 2024-11-13T11:29:53,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741848_1024 (size=5508) 2024-11-13T11:29:53,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741848_1024 (size=5508) 2024-11-13T11:29:53,679 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740/.tmp/table/0e0aebb6251047e7bd045420e0d469a8 2024-11-13T11:29:53,685 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740/.tmp/info/ef3f670477a54316944f7917554e586c as hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740/info/ef3f670477a54316944f7917554e586c 2024-11-13T11:29:53,690 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740/info/ef3f670477a54316944f7917554e586c, entries=10, sequenceid=11, filesize=7.1 K 2024-11-13T11:29:53,691 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740/.tmp/ns/950f601fbfe04a7aa3a9342b58f8ebb9 as hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740/ns/950f601fbfe04a7aa3a9342b58f8ebb9 2024-11-13T11:29:53,696 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740/ns/950f601fbfe04a7aa3a9342b58f8ebb9, entries=2, sequenceid=11, filesize=5.0 K 2024-11-13T11:29:53,697 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740/.tmp/table/0e0aebb6251047e7bd045420e0d469a8 as hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740/table/0e0aebb6251047e7bd045420e0d469a8 2024-11-13T11:29:53,702 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740/table/0e0aebb6251047e7bd045420e0d469a8, entries=2, sequenceid=11, filesize=5.4 K 2024-11-13T11:29:53,703 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 104ms, sequenceid=11, compaction requested=false 2024-11-13T11:29:53,707 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-13T11:29:53,708 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T11:29:53,708 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T11:29:53,708 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731497393599Running coprocessor pre-close hooks at 1731497393599Disabling compacts and flushes for region at 1731497393599Disabling writes for close at 1731497393599Obtaining lock to block concurrent updates at 1731497393599Preparing flush snapshotting stores in 1588230740 at 1731497393599Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731497393599Flushing stores of hbase:meta,,1.1588230740 at 1731497393600 (+1 ms)Flushing 1588230740/info: creating writer at 1731497393600Flushing 1588230740/info: appending metadata at 1731497393620 (+20 ms)Flushing 1588230740/info: closing flushed file at 1731497393620Flushing 1588230740/ns: creating writer at 1731497393636 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731497393649 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1731497393649Flushing 1588230740/table: creating writer at 1731497393661 (+12 ms)Flushing 1588230740/table: appending metadata at 1731497393673 (+12 ms)Flushing 1588230740/table: closing flushed file at 1731497393673Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@47b2d249: reopening flushed file at 1731497393684 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6de5146d: reopening flushed file at 1731497393690 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f66b3e2: reopening flushed file at 1731497393696 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 104ms, sequenceid=11, compaction requested=false at 1731497393703 (+7 ms)Writing region close event to WAL at 1731497393704 (+1 ms)Running coprocessor post-close hooks at 1731497393708 (+4 ms)Closed at 1731497393708 2024-11-13T11:29:53,708 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T11:29:53,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:53,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:53,799 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.HRegionServer(976): stopping server 7bf281cf3991,35385,1731497342405; all regions closed. 2024-11-13T11:29:53,800 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:53,800 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:53,800 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:53,800 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:53,800 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:53,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741834_1010 (size=3306) 2024-11-13T11:29:53,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741834_1010 (size=3306) 2024-11-13T11:29:53,809 DEBUG [RS:0;7bf281cf3991:35385 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/oldWALs 2024-11-13T11:29:53,809 INFO [RS:0;7bf281cf3991:35385 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7bf281cf3991%2C35385%2C1731497342405.meta:.meta(num 1731497343161) 2024-11-13T11:29:53,810 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:53,810 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:53,810 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:53,810 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:53,810 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:53,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741844_1020 (size=1252) 2024-11-13T11:29:53,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741844_1020 (size=1252) 2024-11-13T11:29:53,818 DEBUG [RS:0;7bf281cf3991:35385 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/oldWALs 2024-11-13T11:29:53,818 INFO [RS:0;7bf281cf3991:35385 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7bf281cf3991%2C35385%2C1731497342405:(num 1731497393579) 2024-11-13T11:29:53,818 DEBUG [RS:0;7bf281cf3991:35385 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:29:53,818 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T11:29:53,818 INFO [RS:0;7bf281cf3991:35385 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T11:29:53,818 INFO [RS:0;7bf281cf3991:35385 {}] hbase.ChoreService(370): Chore service for: regionserver/7bf281cf3991:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-13T11:29:53,818 INFO [RS:0;7bf281cf3991:35385 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T11:29:53,818 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T11:29:53,818 INFO [RS:0;7bf281cf3991:35385 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35385 2024-11-13T11:29:53,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T11:29:53,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7bf281cf3991,35385,1731497342405 2024-11-13T11:29:53,820 INFO [RS:0;7bf281cf3991:35385 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T11:29:53,820 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$359/0x00007f6c20900aa0@3d1b1a14 rejected from java.util.concurrent.ThreadPoolExecutor@3924996f[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-13T11:29:53,820 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7bf281cf3991,35385,1731497342405] 2024-11-13T11:29:53,821 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7bf281cf3991,35385,1731497342405 already deleted, retry=false 2024-11-13T11:29:53,821 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7bf281cf3991,35385,1731497342405 expired; onlineServers=0 2024-11-13T11:29:53,821 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7bf281cf3991,44663,1731497342363' ***** 2024-11-13T11:29:53,821 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T11:29:53,821 INFO [M:0;7bf281cf3991:44663 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T11:29:53,821 INFO [M:0;7bf281cf3991:44663 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T11:29:53,821 DEBUG [M:0;7bf281cf3991:44663 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T11:29:53,821 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T11:29:53,821 DEBUG [M:0;7bf281cf3991:44663 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T11:29:53,821 DEBUG [master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497342547 {}] cleaner.HFileCleaner(306): Exit Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497342547,5,FailOnTimeoutGroup] 2024-11-13T11:29:53,821 DEBUG [master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497342547 {}] cleaner.HFileCleaner(306): Exit Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497342547,5,FailOnTimeoutGroup] 2024-11-13T11:29:53,822 INFO [M:0;7bf281cf3991:44663 {}] hbase.ChoreService(370): Chore service for: master/7bf281cf3991:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T11:29:53,822 INFO [M:0;7bf281cf3991:44663 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T11:29:53,822 DEBUG [M:0;7bf281cf3991:44663 {}] master.HMaster(1795): Stopping service threads 2024-11-13T11:29:53,822 INFO [M:0;7bf281cf3991:44663 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T11:29:53,822 INFO [M:0;7bf281cf3991:44663 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T11:29:53,822 INFO [M:0;7bf281cf3991:44663 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T11:29:53,822 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T11:29:53,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T11:29:53,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:53,823 DEBUG [M:0;7bf281cf3991:44663 {}] zookeeper.ZKUtil(347): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T11:29:53,823 WARN [M:0;7bf281cf3991:44663 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T11:29:53,823 INFO [M:0;7bf281cf3991:44663 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/.lastflushedseqids 2024-11-13T11:29:53,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741849_1025 (size=130) 2024-11-13T11:29:53,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741849_1025 (size=130) 2024-11-13T11:29:53,831 INFO [M:0;7bf281cf3991:44663 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T11:29:53,831 INFO [M:0;7bf281cf3991:44663 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T11:29:53,831 DEBUG [M:0;7bf281cf3991:44663 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T11:29:53,831 INFO [M:0;7bf281cf3991:44663 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:29:53,831 DEBUG [M:0;7bf281cf3991:44663 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:29:53,831 DEBUG [M:0;7bf281cf3991:44663 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T11:29:53,831 DEBUG [M:0;7bf281cf3991:44663 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:29:53,831 INFO [M:0;7bf281cf3991:44663 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.60 KB heapSize=55.01 KB 2024-11-13T11:29:53,846 DEBUG [M:0;7bf281cf3991:44663 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b28288e9a1084beb9c1dcefc0bc3e167 is 82, key is hbase:meta,,1/info:regioninfo/1731497343188/Put/seqid=0 2024-11-13T11:29:53,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741850_1026 (size=5672) 2024-11-13T11:29:53,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741850_1026 (size=5672) 2024-11-13T11:29:53,851 INFO [M:0;7bf281cf3991:44663 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b28288e9a1084beb9c1dcefc0bc3e167 2024-11-13T11:29:53,869 DEBUG [M:0;7bf281cf3991:44663 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fdea5f546e7c4eb5ac29d7a58ec6efbd is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731497343618/Put/seqid=0 2024-11-13T11:29:53,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741851_1027 (size=7824) 2024-11-13T11:29:53,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741851_1027 (size=7824) 2024-11-13T11:29:53,875 INFO [M:0;7bf281cf3991:44663 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.00 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fdea5f546e7c4eb5ac29d7a58ec6efbd 2024-11-13T11:29:53,879 INFO [M:0;7bf281cf3991:44663 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fdea5f546e7c4eb5ac29d7a58ec6efbd 2024-11-13T11:29:53,893 DEBUG [M:0;7bf281cf3991:44663 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8c29321405554243a49f0871851ef7d0 is 69, key is 7bf281cf3991,35385,1731497342405/rs:state/1731497342636/Put/seqid=0 2024-11-13T11:29:53,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741852_1028 (size=5156) 2024-11-13T11:29:53,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741852_1028 (size=5156) 2024-11-13T11:29:53,898 INFO [M:0;7bf281cf3991:44663 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8c29321405554243a49f0871851ef7d0 2024-11-13T11:29:53,915 DEBUG [M:0;7bf281cf3991:44663 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b33f0202fe4840fa85030468861cde91 is 52, key is load_balancer_on/state:d/1731497343233/Put/seqid=0 2024-11-13T11:29:53,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741853_1029 (size=5056) 2024-11-13T11:29:53,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741853_1029 (size=5056) 2024-11-13T11:29:53,920 INFO [M:0;7bf281cf3991:44663 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b33f0202fe4840fa85030468861cde91 2024-11-13T11:29:53,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:29:53,921 INFO [RS:0;7bf281cf3991:35385 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T11:29:53,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35385-0x10038d811f80001, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:29:53,921 INFO [RS:0;7bf281cf3991:35385 {}] regionserver.HRegionServer(1031): Exiting; stopping=7bf281cf3991,35385,1731497342405; zookeeper connection closed. 2024-11-13T11:29:53,921 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1b2a83e4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1b2a83e4 2024-11-13T11:29:53,921 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T11:29:53,926 DEBUG [M:0;7bf281cf3991:44663 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b28288e9a1084beb9c1dcefc0bc3e167 as hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b28288e9a1084beb9c1dcefc0bc3e167 2024-11-13T11:29:53,931 INFO [M:0;7bf281cf3991:44663 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b28288e9a1084beb9c1dcefc0bc3e167, entries=8, sequenceid=121, filesize=5.5 K 2024-11-13T11:29:53,932 DEBUG [M:0;7bf281cf3991:44663 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fdea5f546e7c4eb5ac29d7a58ec6efbd as hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fdea5f546e7c4eb5ac29d7a58ec6efbd 2024-11-13T11:29:53,936 INFO [M:0;7bf281cf3991:44663 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fdea5f546e7c4eb5ac29d7a58ec6efbd 2024-11-13T11:29:53,936 INFO [M:0;7bf281cf3991:44663 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fdea5f546e7c4eb5ac29d7a58ec6efbd, entries=14, sequenceid=121, filesize=7.6 K 2024-11-13T11:29:53,937 DEBUG [M:0;7bf281cf3991:44663 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8c29321405554243a49f0871851ef7d0 as hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8c29321405554243a49f0871851ef7d0 2024-11-13T11:29:53,942 INFO [M:0;7bf281cf3991:44663 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8c29321405554243a49f0871851ef7d0, entries=1, sequenceid=121, filesize=5.0 K 2024-11-13T11:29:53,943 DEBUG [M:0;7bf281cf3991:44663 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b33f0202fe4840fa85030468861cde91 as hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b33f0202fe4840fa85030468861cde91 2024-11-13T11:29:53,948 INFO [M:0;7bf281cf3991:44663 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38965/user/jenkins/test-data/22b40213-298b-c1cd-e7e3-9f567dc8c270/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b33f0202fe4840fa85030468861cde91, entries=1, sequenceid=121, filesize=4.9 K 2024-11-13T11:29:53,949 INFO [M:0;7bf281cf3991:44663 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 118ms, sequenceid=121, compaction requested=false 2024-11-13T11:29:53,951 INFO [M:0;7bf281cf3991:44663 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:29:53,951 DEBUG [M:0;7bf281cf3991:44663 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731497393831Disabling compacts and flushes for region at 1731497393831Disabling writes for close at 1731497393831Obtaining lock to block concurrent updates at 1731497393831Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731497393831Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44650, getHeapSize=56264, getOffHeapSize=0, getCellsCount=140 at 1731497393832 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731497393832Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731497393832Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731497393845 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731497393845Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731497393855 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731497393869 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731497393869Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731497393879 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731497393893 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731497393893Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731497393902 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731497393915 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731497393915Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7ab43e42: reopening flushed file at 1731497393925 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7140c33f: reopening flushed file at 1731497393931 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f49e9dc: reopening flushed file at 1731497393937 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4add007b: reopening flushed file at 1731497393942 (+5 ms)Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 118ms, sequenceid=121, compaction requested=false at 1731497393949 (+7 ms)Writing region close event to WAL at 1731497393951 (+2 ms)Closed at 1731497393951 2024-11-13T11:29:53,951 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:53,951 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:53,952 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:53,952 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:53,952 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:29:53,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36997 is added to blk_1073741830_1006 (size=53047) 2024-11-13T11:29:53,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34305 is added to blk_1073741830_1006 (size=53047) 2024-11-13T11:29:53,955 INFO [M:0;7bf281cf3991:44663 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T11:29:53,955 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T11:29:53,955 INFO [M:0;7bf281cf3991:44663 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44663 2024-11-13T11:29:53,955 INFO [M:0;7bf281cf3991:44663 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T11:29:53,965 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T11:29:53,965 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T11:29:53,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T11:29:53,967 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T11:29:54,057 INFO [M:0;7bf281cf3991:44663 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T11:29:54,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:29:54,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44663-0x10038d811f80000, quorum=127.0.0.1:56388, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:29:54,062 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@25901f18{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:29:54,063 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@37f050e6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:29:54,063 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:29:54,063 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6dc4d1f3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:29:54,063 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5090366a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/hadoop.log.dir/,STOPPED} 2024-11-13T11:29:54,067 WARN [BP-605604222-172.17.0.2-1731497341796 heartbeating to localhost/127.0.0.1:38965 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:29:54,067 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:29:54,067 WARN [BP-605604222-172.17.0.2-1731497341796 heartbeating to localhost/127.0.0.1:38965 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-605604222-172.17.0.2-1731497341796 (Datanode Uuid 9af81504-50a9-49e7-8c91-fb409e0eac2b) service to localhost/127.0.0.1:38965 2024-11-13T11:29:54,067 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:29:54,067 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/cluster_67085c6e-5d8c-71db-9034-013da80d2922/data/data3/current/BP-605604222-172.17.0.2-1731497341796 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:29:54,067 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/cluster_67085c6e-5d8c-71db-9034-013da80d2922/data/data4/current/BP-605604222-172.17.0.2-1731497341796 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:29:54,068 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:29:54,069 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@403776ad{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:29:54,069 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43fad179{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:29:54,069 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:29:54,070 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f373d12{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:29:54,070 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73038ac2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/hadoop.log.dir/,STOPPED} 2024-11-13T11:29:54,071 WARN [BP-605604222-172.17.0.2-1731497341796 heartbeating to localhost/127.0.0.1:38965 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:29:54,071 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:29:54,071 WARN [BP-605604222-172.17.0.2-1731497341796 heartbeating to localhost/127.0.0.1:38965 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-605604222-172.17.0.2-1731497341796 (Datanode Uuid 6663ed8d-0bcf-43d3-bd0f-9ca84b10c6a6) service to localhost/127.0.0.1:38965 2024-11-13T11:29:54,071 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:29:54,071 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/cluster_67085c6e-5d8c-71db-9034-013da80d2922/data/data1/current/BP-605604222-172.17.0.2-1731497341796 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:29:54,072 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/cluster_67085c6e-5d8c-71db-9034-013da80d2922/data/data2/current/BP-605604222-172.17.0.2-1731497341796 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:29:54,072 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:29:54,077 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@593778be{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T11:29:54,078 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@391ebee0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:29:54,078 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:29:54,078 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b6c619b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:29:54,078 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18e0f0d5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/hadoop.log.dir/,STOPPED} 2024-11-13T11:29:54,084 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T11:29:54,103 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T11:29:54,110 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 179) Potentially hanging thread: regionserver/7bf281cf3991:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38965 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:38965 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38965 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38965 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38965 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:38965 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:38965 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:38965 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38965 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=61 (was 113), ProcessCount=11 (was 11), AvailableMemoryMB=2074 (was 2193) 2024-11-13T11:29:54,117 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=61, ProcessCount=11, AvailableMemoryMB=2074 2024-11-13T11:29:54,117 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T11:29:54,117 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/hadoop.log.dir so I do NOT create it in target/test-data/b35b8de0-4b01-1900-49de-655f132af489 2024-11-13T11:29:54,117 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0ea6061-1989-a20e-4f7d-9ba5d851f6fe/hadoop.tmp.dir so I do NOT create it in target/test-data/b35b8de0-4b01-1900-49de-655f132af489 2024-11-13T11:29:54,118 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/cluster_cb21362e-aa2c-e7f9-4e70-7396e0fd05eb, deleteOnExit=true 2024-11-13T11:29:54,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T11:29:54,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/test.cache.data in system properties and HBase conf 2024-11-13T11:29:54,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T11:29:54,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/hadoop.log.dir in system properties and HBase conf 2024-11-13T11:29:54,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T11:29:54,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T11:29:54,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T11:29:54,118 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T11:29:54,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T11:29:54,119 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T11:29:54,119 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T11:29:54,119 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T11:29:54,119 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T11:29:54,119 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T11:29:54,119 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T11:29:54,119 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T11:29:54,119 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T11:29:54,119 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/nfs.dump.dir in system properties and HBase conf 2024-11-13T11:29:54,119 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/java.io.tmpdir in system properties and HBase conf 2024-11-13T11:29:54,119 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T11:29:54,119 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T11:29:54,119 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T11:29:54,133 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T11:29:54,185 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:29:54,190 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:29:54,194 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:29:54,194 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:29:54,195 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T11:29:54,196 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:29:54,197 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b345207{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:29:54,197 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7af4b83b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:29:54,310 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@928ae22{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/java.io.tmpdir/jetty-localhost-39687-hadoop-hdfs-3_4_1-tests_jar-_-any-282863122610871533/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T11:29:54,310 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7ab7d584{HTTP/1.1, (http/1.1)}{localhost:39687} 2024-11-13T11:29:54,310 INFO [Time-limited test {}] server.Server(415): Started @236471ms 2024-11-13T11:29:54,322 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T11:29:54,361 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:29:54,364 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:29:54,365 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:29:54,365 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:29:54,365 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T11:29:54,366 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7bd0bba8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:29:54,366 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60993b69{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:29:54,458 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5df62b4a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/java.io.tmpdir/jetty-localhost-37729-hadoop-hdfs-3_4_1-tests_jar-_-any-737277995283967776/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:29:54,458 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5f41f884{HTTP/1.1, (http/1.1)}{localhost:37729} 2024-11-13T11:29:54,458 INFO [Time-limited test {}] server.Server(415): Started @236619ms 2024-11-13T11:29:54,459 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:29:54,482 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:29:54,484 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:29:54,487 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:29:54,487 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:29:54,487 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T11:29:54,487 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a865b5f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:29:54,487 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13da1053{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:29:54,513 WARN [Thread-1946 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/cluster_cb21362e-aa2c-e7f9-4e70-7396e0fd05eb/data/data1/current/BP-1554668404-172.17.0.2-1731497394136/current, will proceed with Du for space computation calculation, 2024-11-13T11:29:54,513 WARN [Thread-1947 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/cluster_cb21362e-aa2c-e7f9-4e70-7396e0fd05eb/data/data2/current/BP-1554668404-172.17.0.2-1731497394136/current, will proceed with Du for space computation calculation, 2024-11-13T11:29:54,545 WARN [Thread-1925 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:29:54,547 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf404a40f4236672e with lease ID 0x14f16b67a7eab9a9: Processing first storage report for DS-636d4d7a-b1e1-453d-9ee1-1bde46496604 from datanode DatanodeRegistration(127.0.0.1:36099, datanodeUuid=03d2a3d9-7807-4b68-8407-dd950d311662, infoPort=33035, infoSecurePort=0, ipcPort=43693, storageInfo=lv=-57;cid=testClusterID;nsid=436109398;c=1731497394136) 2024-11-13T11:29:54,547 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf404a40f4236672e with lease ID 0x14f16b67a7eab9a9: from storage DS-636d4d7a-b1e1-453d-9ee1-1bde46496604 node DatanodeRegistration(127.0.0.1:36099, datanodeUuid=03d2a3d9-7807-4b68-8407-dd950d311662, infoPort=33035, infoSecurePort=0, ipcPort=43693, storageInfo=lv=-57;cid=testClusterID;nsid=436109398;c=1731497394136), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:29:54,547 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf404a40f4236672e with lease ID 0x14f16b67a7eab9a9: Processing first storage report for DS-327854ec-5ba2-4118-8a45-7ac1db32fa5d from datanode DatanodeRegistration(127.0.0.1:36099, datanodeUuid=03d2a3d9-7807-4b68-8407-dd950d311662, infoPort=33035, infoSecurePort=0, ipcPort=43693, storageInfo=lv=-57;cid=testClusterID;nsid=436109398;c=1731497394136) 2024-11-13T11:29:54,547 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf404a40f4236672e with lease ID 0x14f16b67a7eab9a9: from storage DS-327854ec-5ba2-4118-8a45-7ac1db32fa5d node DatanodeRegistration(127.0.0.1:36099, datanodeUuid=03d2a3d9-7807-4b68-8407-dd950d311662, infoPort=33035, infoSecurePort=0, ipcPort=43693, storageInfo=lv=-57;cid=testClusterID;nsid=436109398;c=1731497394136), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:29:54,598 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@fffa550{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/java.io.tmpdir/jetty-localhost-40079-hadoop-hdfs-3_4_1-tests_jar-_-any-4171276035923949293/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:29:54,598 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1a183da4{HTTP/1.1, (http/1.1)}{localhost:40079} 2024-11-13T11:29:54,598 INFO [Time-limited test {}] server.Server(415): Started @236759ms 2024-11-13T11:29:54,599 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:29:54,652 INFO [regionserver/7bf281cf3991:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T11:29:54,654 WARN [Thread-1972 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/cluster_cb21362e-aa2c-e7f9-4e70-7396e0fd05eb/data/data3/current/BP-1554668404-172.17.0.2-1731497394136/current, will proceed with Du for space computation calculation, 2024-11-13T11:29:54,654 WARN [Thread-1973 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/cluster_cb21362e-aa2c-e7f9-4e70-7396e0fd05eb/data/data4/current/BP-1554668404-172.17.0.2-1731497394136/current, will proceed with Du for space computation calculation, 2024-11-13T11:29:54,673 WARN [Thread-1961 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:29:54,675 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3f2a40ff445c26a2 with lease ID 0x14f16b67a7eab9aa: Processing first storage report for DS-a6571efd-6d7d-4f56-b09f-b905afd795ee from datanode DatanodeRegistration(127.0.0.1:35381, datanodeUuid=39141b50-6f68-4cdd-a4ff-e8286004802c, infoPort=46625, infoSecurePort=0, ipcPort=45737, storageInfo=lv=-57;cid=testClusterID;nsid=436109398;c=1731497394136) 2024-11-13T11:29:54,675 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3f2a40ff445c26a2 with lease ID 0x14f16b67a7eab9aa: from storage DS-a6571efd-6d7d-4f56-b09f-b905afd795ee node DatanodeRegistration(127.0.0.1:35381, datanodeUuid=39141b50-6f68-4cdd-a4ff-e8286004802c, infoPort=46625, infoSecurePort=0, ipcPort=45737, storageInfo=lv=-57;cid=testClusterID;nsid=436109398;c=1731497394136), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:29:54,675 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3f2a40ff445c26a2 with lease ID 0x14f16b67a7eab9aa: Processing first storage report for DS-84e8a69a-7c7c-4ae3-8d42-e2da472d951c from datanode DatanodeRegistration(127.0.0.1:35381, datanodeUuid=39141b50-6f68-4cdd-a4ff-e8286004802c, infoPort=46625, infoSecurePort=0, ipcPort=45737, storageInfo=lv=-57;cid=testClusterID;nsid=436109398;c=1731497394136) 2024-11-13T11:29:54,675 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3f2a40ff445c26a2 with lease ID 0x14f16b67a7eab9aa: from storage DS-84e8a69a-7c7c-4ae3-8d42-e2da472d951c node DatanodeRegistration(127.0.0.1:35381, datanodeUuid=39141b50-6f68-4cdd-a4ff-e8286004802c, infoPort=46625, infoSecurePort=0, ipcPort=45737, storageInfo=lv=-57;cid=testClusterID;nsid=436109398;c=1731497394136), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:29:54,720 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489 2024-11-13T11:29:54,725 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/cluster_cb21362e-aa2c-e7f9-4e70-7396e0fd05eb/zookeeper_0, clientPort=62626, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/cluster_cb21362e-aa2c-e7f9-4e70-7396e0fd05eb/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/cluster_cb21362e-aa2c-e7f9-4e70-7396e0fd05eb/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T11:29:54,726 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62626 2024-11-13T11:29:54,726 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:29:54,729 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:29:54,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741825_1001 (size=7) 2024-11-13T11:29:54,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741825_1001 (size=7) 2024-11-13T11:29:54,740 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef with version=8 2024-11-13T11:29:54,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/hbase-staging 2024-11-13T11:29:54,743 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7bf281cf3991:0 server-side Connection retries=45 2024-11-13T11:29:54,743 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:29:54,743 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T11:29:54,743 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T11:29:54,743 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:29:54,743 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T11:29:54,743 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T11:29:54,743 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T11:29:54,744 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45517 2024-11-13T11:29:54,745 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45517 connecting to ZooKeeper ensemble=127.0.0.1:62626 2024-11-13T11:29:54,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:455170x0, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T11:29:54,749 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45517-0x10038d8de920000 connected 2024-11-13T11:29:54,760 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:29:54,761 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:29:54,763 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:29:54,763 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef, hbase.cluster.distributed=false 2024-11-13T11:29:54,764 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T11:29:54,765 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45517 2024-11-13T11:29:54,765 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45517 2024-11-13T11:29:54,765 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45517 2024-11-13T11:29:54,768 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45517 2024-11-13T11:29:54,769 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45517 2024-11-13T11:29:54,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:54,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:54,782 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7bf281cf3991:0 server-side Connection retries=45 2024-11-13T11:29:54,782 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:29:54,782 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T11:29:54,783 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T11:29:54,783 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:29:54,783 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T11:29:54,783 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T11:29:54,783 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T11:29:54,783 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38217 2024-11-13T11:29:54,785 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38217 connecting to ZooKeeper ensemble=127.0.0.1:62626 2024-11-13T11:29:54,785 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:29:54,786 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:29:54,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:382170x0, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T11:29:54,789 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38217-0x10038d8de920001 connected 2024-11-13T11:29:54,789 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:29:54,790 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T11:29:54,790 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T11:29:54,791 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T11:29:54,791 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T11:29:54,792 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38217 2024-11-13T11:29:54,792 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38217 2024-11-13T11:29:54,792 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38217 2024-11-13T11:29:54,792 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38217 2024-11-13T11:29:54,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38217 2024-11-13T11:29:54,804 DEBUG [M:0;7bf281cf3991:45517 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7bf281cf3991:45517 2024-11-13T11:29:54,804 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7bf281cf3991,45517,1731497394742 2024-11-13T11:29:54,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:29:54,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:29:54,806 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7bf281cf3991,45517,1731497394742 2024-11-13T11:29:54,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T11:29:54,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:54,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:54,807 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T11:29:54,807 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7bf281cf3991,45517,1731497394742 from backup master directory 2024-11-13T11:29:54,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7bf281cf3991,45517,1731497394742 2024-11-13T11:29:54,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:29:54,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:29:54,808 WARN [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T11:29:54,808 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7bf281cf3991,45517,1731497394742 2024-11-13T11:29:54,811 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/hbase.id] with ID: aa7b4e2d-27a0-4192-ab35-449b89de0653 2024-11-13T11:29:54,811 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/.tmp/hbase.id 2024-11-13T11:29:54,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741826_1002 (size=42) 2024-11-13T11:29:54,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741826_1002 (size=42) 2024-11-13T11:29:54,817 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/.tmp/hbase.id]:[hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/hbase.id] 2024-11-13T11:29:54,828 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:29:54,828 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T11:29:54,829 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-13T11:29:54,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:54,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:54,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741827_1003 (size=196) 2024-11-13T11:29:54,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741827_1003 (size=196) 2024-11-13T11:29:54,836 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T11:29:54,837 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T11:29:54,838 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:29:54,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741828_1004 (size=1189) 2024-11-13T11:29:54,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741828_1004 (size=1189) 2024-11-13T11:29:54,847 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store 2024-11-13T11:29:54,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741829_1005 (size=34) 2024-11-13T11:29:54,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741829_1005 (size=34) 2024-11-13T11:29:54,855 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:29:54,855 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T11:29:54,855 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:29:54,855 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:29:54,855 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T11:29:54,855 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:29:54,855 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:29:54,855 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731497394855Disabling compacts and flushes for region at 1731497394855Disabling writes for close at 1731497394855Writing region close event to WAL at 1731497394855Closed at 1731497394855 2024-11-13T11:29:54,856 WARN [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/.initializing 2024-11-13T11:29:54,856 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/WALs/7bf281cf3991,45517,1731497394742 2024-11-13T11:29:54,859 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C45517%2C1731497394742, suffix=, logDir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/WALs/7bf281cf3991,45517,1731497394742, archiveDir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/oldWALs, maxLogs=10 2024-11-13T11:29:54,860 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C45517%2C1731497394742.1731497394860 2024-11-13T11:29:54,864 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/WALs/7bf281cf3991,45517,1731497394742/7bf281cf3991%2C45517%2C1731497394742.1731497394860 2024-11-13T11:29:54,866 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33035:33035),(127.0.0.1/127.0.0.1:46625:46625)] 2024-11-13T11:29:54,869 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T11:29:54,869 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:29:54,869 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:54,869 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:54,871 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:54,872 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T11:29:54,872 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:54,872 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:29:54,872 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:54,873 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T11:29:54,873 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:54,874 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:29:54,874 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:54,875 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T11:29:54,875 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:54,875 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:29:54,875 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:54,876 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T11:29:54,876 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:54,877 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:29:54,877 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:54,877 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:54,878 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:54,879 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:54,879 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:54,879 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T11:29:54,880 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:29:54,882 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T11:29:54,882 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=746593, jitterRate=-0.05065849423408508}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T11:29:54,883 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731497394869Initializing all the Stores at 1731497394870 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497394870Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497394871 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497394871Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497394871Cleaning up temporary data from old regions at 1731497394879 (+8 ms)Region opened successfully at 1731497394883 (+4 ms) 2024-11-13T11:29:54,883 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T11:29:54,885 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49c4e3cd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7bf281cf3991/172.17.0.2:0 2024-11-13T11:29:54,886 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T11:29:54,886 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T11:29:54,886 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T11:29:54,886 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T11:29:54,887 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T11:29:54,887 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T11:29:54,887 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T11:29:54,889 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T11:29:54,889 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T11:29:54,890 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T11:29:54,890 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T11:29:54,891 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T11:29:54,892 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T11:29:54,892 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T11:29:54,893 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T11:29:54,893 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T11:29:54,894 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T11:29:54,895 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T11:29:54,896 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T11:29:54,897 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T11:29:54,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T11:29:54,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T11:29:54,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:54,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:54,898 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7bf281cf3991,45517,1731497394742, sessionid=0x10038d8de920000, setting cluster-up flag (Was=false) 2024-11-13T11:29:54,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:54,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:54,902 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T11:29:54,903 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7bf281cf3991,45517,1731497394742 2024-11-13T11:29:54,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:54,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:54,908 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T11:29:54,909 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7bf281cf3991,45517,1731497394742 2024-11-13T11:29:54,910 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T11:29:54,911 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T11:29:54,911 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T11:29:54,911 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T11:29:54,912 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7bf281cf3991,45517,1731497394742 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T11:29:54,913 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:29:54,913 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:29:54,913 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:29:54,913 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:29:54,913 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7bf281cf3991:0, corePoolSize=10, maxPoolSize=10 2024-11-13T11:29:54,913 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:54,913 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7bf281cf3991:0, corePoolSize=2, maxPoolSize=2 2024-11-13T11:29:54,913 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:54,915 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731497424915 2024-11-13T11:29:54,915 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T11:29:54,915 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T11:29:54,915 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T11:29:54,915 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T11:29:54,915 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T11:29:54,915 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T11:29:54,915 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:29:54,915 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T11:29:54,915 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:54,915 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T11:29:54,916 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T11:29:54,916 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T11:29:54,916 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T11:29:54,916 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T11:29:54,916 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:54,916 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497394916,5,FailOnTimeoutGroup] 2024-11-13T11:29:54,916 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T11:29:54,916 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497394916,5,FailOnTimeoutGroup] 2024-11-13T11:29:54,916 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:54,916 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T11:29:54,916 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:54,917 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:54,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741831_1007 (size=1321) 2024-11-13T11:29:54,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741831_1007 (size=1321) 2024-11-13T11:29:54,924 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T11:29:54,924 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef 2024-11-13T11:29:54,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741832_1008 (size=32) 2024-11-13T11:29:54,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741832_1008 (size=32) 2024-11-13T11:29:54,931 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:29:54,932 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T11:29:54,933 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T11:29:54,933 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:54,934 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:29:54,934 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T11:29:54,935 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T11:29:54,935 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:54,935 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:29:54,935 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T11:29:54,936 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T11:29:54,936 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:54,936 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:29:54,936 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T11:29:54,937 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T11:29:54,937 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:54,938 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:29:54,938 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T11:29:54,938 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740 2024-11-13T11:29:54,939 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740 2024-11-13T11:29:54,940 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T11:29:54,940 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T11:29:54,941 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T11:29:54,942 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T11:29:54,944 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T11:29:54,945 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=693377, jitterRate=-0.11832612752914429}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T11:29:54,945 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731497394931Initializing all the Stores at 1731497394932 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497394932Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497394932Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497394932Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497394932Cleaning up temporary data from old regions at 1731497394940 (+8 ms)Region opened successfully at 1731497394945 (+5 ms) 2024-11-13T11:29:54,945 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T11:29:54,945 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T11:29:54,945 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T11:29:54,945 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T11:29:54,945 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T11:29:54,946 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T11:29:54,946 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731497394945Disabling compacts and flushes for region at 1731497394945Disabling writes for close at 1731497394945Writing region close event to WAL at 1731497394946 (+1 ms)Closed at 1731497394946 2024-11-13T11:29:54,947 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:29:54,947 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T11:29:54,947 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T11:29:54,949 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T11:29:54,950 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T11:29:54,995 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.HRegionServer(746): ClusterId : aa7b4e2d-27a0-4192-ab35-449b89de0653 2024-11-13T11:29:54,995 DEBUG [RS:0;7bf281cf3991:38217 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T11:29:54,996 DEBUG [RS:0;7bf281cf3991:38217 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T11:29:54,996 DEBUG [RS:0;7bf281cf3991:38217 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T11:29:54,998 DEBUG [RS:0;7bf281cf3991:38217 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T11:29:54,998 DEBUG [RS:0;7bf281cf3991:38217 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e78f8b3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7bf281cf3991/172.17.0.2:0 2024-11-13T11:29:55,011 DEBUG [RS:0;7bf281cf3991:38217 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7bf281cf3991:38217 2024-11-13T11:29:55,011 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T11:29:55,011 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T11:29:55,011 DEBUG [RS:0;7bf281cf3991:38217 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T11:29:55,012 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.HRegionServer(2659): reportForDuty to master=7bf281cf3991,45517,1731497394742 with port=38217, startcode=1731497394782 2024-11-13T11:29:55,012 DEBUG [RS:0;7bf281cf3991:38217 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T11:29:55,014 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58675, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T11:29:55,015 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45517 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7bf281cf3991,38217,1731497394782 2024-11-13T11:29:55,015 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45517 {}] master.ServerManager(517): Registering regionserver=7bf281cf3991,38217,1731497394782 2024-11-13T11:29:55,016 DEBUG [RS:0;7bf281cf3991:38217 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef 2024-11-13T11:29:55,016 DEBUG [RS:0;7bf281cf3991:38217 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45435 2024-11-13T11:29:55,016 DEBUG [RS:0;7bf281cf3991:38217 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T11:29:55,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T11:29:55,018 DEBUG [RS:0;7bf281cf3991:38217 {}] zookeeper.ZKUtil(111): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7bf281cf3991,38217,1731497394782 2024-11-13T11:29:55,018 WARN [RS:0;7bf281cf3991:38217 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T11:29:55,018 INFO [RS:0;7bf281cf3991:38217 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:29:55,018 DEBUG [RS:0;7bf281cf3991:38217 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/WALs/7bf281cf3991,38217,1731497394782 2024-11-13T11:29:55,018 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7bf281cf3991,38217,1731497394782] 2024-11-13T11:29:55,021 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T11:29:55,022 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T11:29:55,023 INFO [RS:0;7bf281cf3991:38217 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T11:29:55,023 INFO [RS:0;7bf281cf3991:38217 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:55,023 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T11:29:55,024 INFO [RS:0;7bf281cf3991:38217 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T11:29:55,024 INFO [RS:0;7bf281cf3991:38217 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:55,024 DEBUG [RS:0;7bf281cf3991:38217 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:55,024 DEBUG [RS:0;7bf281cf3991:38217 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:55,024 DEBUG [RS:0;7bf281cf3991:38217 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:55,024 DEBUG [RS:0;7bf281cf3991:38217 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:55,024 DEBUG [RS:0;7bf281cf3991:38217 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:55,024 DEBUG [RS:0;7bf281cf3991:38217 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7bf281cf3991:0, corePoolSize=2, maxPoolSize=2 2024-11-13T11:29:55,024 DEBUG [RS:0;7bf281cf3991:38217 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:55,024 DEBUG [RS:0;7bf281cf3991:38217 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:55,024 DEBUG [RS:0;7bf281cf3991:38217 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:55,024 DEBUG [RS:0;7bf281cf3991:38217 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:55,024 DEBUG [RS:0;7bf281cf3991:38217 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:55,024 DEBUG [RS:0;7bf281cf3991:38217 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:29:55,024 DEBUG [RS:0;7bf281cf3991:38217 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7bf281cf3991:0, corePoolSize=3, maxPoolSize=3 2024-11-13T11:29:55,025 DEBUG [RS:0;7bf281cf3991:38217 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0, corePoolSize=3, maxPoolSize=3 2024-11-13T11:29:55,026 INFO [RS:0;7bf281cf3991:38217 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:55,026 INFO [RS:0;7bf281cf3991:38217 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:55,026 INFO [RS:0;7bf281cf3991:38217 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:55,026 INFO [RS:0;7bf281cf3991:38217 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:55,026 INFO [RS:0;7bf281cf3991:38217 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:55,026 INFO [RS:0;7bf281cf3991:38217 {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,38217,1731497394782-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T11:29:55,039 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T11:29:55,040 INFO [RS:0;7bf281cf3991:38217 {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,38217,1731497394782-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:55,040 INFO [RS:0;7bf281cf3991:38217 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:55,040 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.Replication(171): 7bf281cf3991,38217,1731497394782 started 2024-11-13T11:29:55,053 INFO [RS:0;7bf281cf3991:38217 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:55,053 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.HRegionServer(1482): Serving as 7bf281cf3991,38217,1731497394782, RpcServer on 7bf281cf3991/172.17.0.2:38217, sessionid=0x10038d8de920001 2024-11-13T11:29:55,053 DEBUG [RS:0;7bf281cf3991:38217 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T11:29:55,053 DEBUG [RS:0;7bf281cf3991:38217 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7bf281cf3991,38217,1731497394782 2024-11-13T11:29:55,053 DEBUG [RS:0;7bf281cf3991:38217 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7bf281cf3991,38217,1731497394782' 2024-11-13T11:29:55,053 DEBUG [RS:0;7bf281cf3991:38217 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T11:29:55,054 DEBUG [RS:0;7bf281cf3991:38217 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T11:29:55,054 DEBUG [RS:0;7bf281cf3991:38217 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T11:29:55,054 DEBUG [RS:0;7bf281cf3991:38217 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T11:29:55,054 DEBUG [RS:0;7bf281cf3991:38217 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7bf281cf3991,38217,1731497394782 2024-11-13T11:29:55,054 DEBUG [RS:0;7bf281cf3991:38217 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7bf281cf3991,38217,1731497394782' 2024-11-13T11:29:55,054 DEBUG [RS:0;7bf281cf3991:38217 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T11:29:55,055 DEBUG [RS:0;7bf281cf3991:38217 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T11:29:55,055 DEBUG [RS:0;7bf281cf3991:38217 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T11:29:55,055 INFO [RS:0;7bf281cf3991:38217 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T11:29:55,055 INFO [RS:0;7bf281cf3991:38217 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T11:29:55,100 WARN [7bf281cf3991:45517 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T11:29:55,159 INFO [RS:0;7bf281cf3991:38217 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C38217%2C1731497394782, suffix=, logDir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/WALs/7bf281cf3991,38217,1731497394782, archiveDir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/oldWALs, maxLogs=32 2024-11-13T11:29:55,161 INFO [RS:0;7bf281cf3991:38217 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C38217%2C1731497394782.1731497395160 2024-11-13T11:29:55,171 INFO [RS:0;7bf281cf3991:38217 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/WALs/7bf281cf3991,38217,1731497394782/7bf281cf3991%2C38217%2C1731497394782.1731497395160 2024-11-13T11:29:55,172 DEBUG [RS:0;7bf281cf3991:38217 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33035:33035),(127.0.0.1/127.0.0.1:46625:46625)] 2024-11-13T11:29:55,350 DEBUG [7bf281cf3991:45517 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T11:29:55,351 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7bf281cf3991,38217,1731497394782 2024-11-13T11:29:55,354 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7bf281cf3991,38217,1731497394782, state=OPENING 2024-11-13T11:29:55,356 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T11:29:55,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:55,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:29:55,359 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T11:29:55,359 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7bf281cf3991,38217,1731497394782}] 2024-11-13T11:29:55,359 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:29:55,359 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:29:55,514 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T11:29:55,519 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34385, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T11:29:55,525 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T11:29:55,525 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:29:55,527 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C38217%2C1731497394782.meta, suffix=.meta, logDir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/WALs/7bf281cf3991,38217,1731497394782, archiveDir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/oldWALs, maxLogs=32 2024-11-13T11:29:55,528 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C38217%2C1731497394782.meta.1731497395527.meta 2024-11-13T11:29:55,533 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/WALs/7bf281cf3991,38217,1731497394782/7bf281cf3991%2C38217%2C1731497394782.meta.1731497395527.meta 2024-11-13T11:29:55,534 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33035:33035),(127.0.0.1/127.0.0.1:46625:46625)] 2024-11-13T11:29:55,535 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T11:29:55,535 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T11:29:55,535 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T11:29:55,535 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T11:29:55,535 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T11:29:55,535 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:29:55,535 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T11:29:55,535 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T11:29:55,537 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T11:29:55,538 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T11:29:55,538 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:55,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:29:55,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T11:29:55,539 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T11:29:55,539 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:55,539 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:29:55,539 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T11:29:55,540 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T11:29:55,540 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:55,540 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:29:55,541 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T11:29:55,541 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T11:29:55,541 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:55,542 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:29:55,542 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T11:29:55,542 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740 2024-11-13T11:29:55,544 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740 2024-11-13T11:29:55,545 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T11:29:55,545 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T11:29:55,545 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T11:29:55,547 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T11:29:55,547 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=881412, jitterRate=0.12077401578426361}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T11:29:55,548 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T11:29:55,548 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731497395535Writing region info on filesystem at 1731497395535Initializing all the Stores at 1731497395536 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497395536Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497395537 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497395537Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497395537Cleaning up temporary data from old regions at 1731497395545 (+8 ms)Running coprocessor post-open hooks at 1731497395548 (+3 ms)Region opened successfully at 1731497395548 2024-11-13T11:29:55,549 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731497395514 2024-11-13T11:29:55,551 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T11:29:55,551 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T11:29:55,552 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7bf281cf3991,38217,1731497394782 2024-11-13T11:29:55,554 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7bf281cf3991,38217,1731497394782, state=OPEN 2024-11-13T11:29:55,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T11:29:55,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T11:29:55,556 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7bf281cf3991,38217,1731497394782 2024-11-13T11:29:55,556 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:29:55,556 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:29:55,559 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T11:29:55,559 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7bf281cf3991,38217,1731497394782 in 197 msec 2024-11-13T11:29:55,561 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T11:29:55,561 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 612 msec 2024-11-13T11:29:55,562 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:29:55,562 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T11:29:55,564 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T11:29:55,564 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7bf281cf3991,38217,1731497394782, seqNum=-1] 2024-11-13T11:29:55,565 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T11:29:55,566 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44369, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T11:29:55,572 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 660 msec 2024-11-13T11:29:55,572 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731497395572, completionTime=-1 2024-11-13T11:29:55,572 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T11:29:55,572 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T11:29:55,574 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T11:29:55,574 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731497455574 2024-11-13T11:29:55,574 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731497515574 2024-11-13T11:29:55,574 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-13T11:29:55,574 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,45517,1731497394742-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:55,574 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,45517,1731497394742-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:55,574 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,45517,1731497394742-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:55,574 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7bf281cf3991:45517, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:55,574 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:55,574 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:55,576 DEBUG [master/7bf281cf3991:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T11:29:55,577 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.769sec 2024-11-13T11:29:55,577 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T11:29:55,577 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T11:29:55,577 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T11:29:55,577 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T11:29:55,577 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T11:29:55,577 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,45517,1731497394742-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T11:29:55,578 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,45517,1731497394742-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T11:29:55,580 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T11:29:55,580 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T11:29:55,580 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,45517,1731497394742-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:29:55,595 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e79bdc0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:29:55,595 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7bf281cf3991,45517,-1 for getting cluster id 2024-11-13T11:29:55,595 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T11:29:55,597 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'aa7b4e2d-27a0-4192-ab35-449b89de0653' 2024-11-13T11:29:55,597 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T11:29:55,597 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "aa7b4e2d-27a0-4192-ab35-449b89de0653" 2024-11-13T11:29:55,597 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ce3d50f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:29:55,597 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7bf281cf3991,45517,-1] 2024-11-13T11:29:55,598 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T11:29:55,598 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:29:55,599 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47048, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T11:29:55,600 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75115877, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:29:55,600 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T11:29:55,602 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7bf281cf3991,38217,1731497394782, seqNum=-1] 2024-11-13T11:29:55,602 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T11:29:55,603 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44852, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T11:29:55,606 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7bf281cf3991,45517,1731497394742 2024-11-13T11:29:55,606 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:29:55,610 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T11:29:55,610 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T11:29:55,611 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 7bf281cf3991,45517,1731497394742 2024-11-13T11:29:55,612 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@554e3efd 2024-11-13T11:29:55,612 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T11:29:55,613 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47052, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T11:29:55,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45517 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-13T11:29:55,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45517 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-13T11:29:55,614 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45517 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T11:29:55,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45517 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-13T11:29:55,617 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T11:29:55,617 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:55,617 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45517 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-13T11:29:55,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45517 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T11:29:55,618 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T11:29:55,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741835_1011 (size=381) 2024-11-13T11:29:55,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741835_1011 (size=381) 2024-11-13T11:29:55,629 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => fc299128ef471b6596e90fdba7a5058f, NAME => 'TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef 2024-11-13T11:29:55,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741836_1012 (size=64) 2024-11-13T11:29:55,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741836_1012 (size=64) 2024-11-13T11:29:55,637 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:29:55,637 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing fc299128ef471b6596e90fdba7a5058f, disabling compactions & flushes 2024-11-13T11:29:55,637 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f. 2024-11-13T11:29:55,637 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f. 2024-11-13T11:29:55,638 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f. after waiting 1 ms 2024-11-13T11:29:55,638 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f. 2024-11-13T11:29:55,638 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f. 2024-11-13T11:29:55,638 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for fc299128ef471b6596e90fdba7a5058f: Waiting for close lock at 1731497395637Disabling compacts and flushes for region at 1731497395637Disabling writes for close at 1731497395638 (+1 ms)Writing region close event to WAL at 1731497395638Closed at 1731497395638 2024-11-13T11:29:55,639 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T11:29:55,639 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731497395639"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731497395639"}]},"ts":"1731497395639"} 2024-11-13T11:29:55,641 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T11:29:55,642 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T11:29:55,642 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731497395642"}]},"ts":"1731497395642"} 2024-11-13T11:29:55,644 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-13T11:29:55,644 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fc299128ef471b6596e90fdba7a5058f, ASSIGN}] 2024-11-13T11:29:55,645 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fc299128ef471b6596e90fdba7a5058f, ASSIGN 2024-11-13T11:29:55,646 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fc299128ef471b6596e90fdba7a5058f, ASSIGN; state=OFFLINE, location=7bf281cf3991,38217,1731497394782; forceNewPlan=false, retain=false 2024-11-13T11:29:55,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:55,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:55,797 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=fc299128ef471b6596e90fdba7a5058f, regionState=OPENING, regionLocation=7bf281cf3991,38217,1731497394782 2024-11-13T11:29:55,803 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fc299128ef471b6596e90fdba7a5058f, ASSIGN because future has completed 2024-11-13T11:29:55,804 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure fc299128ef471b6596e90fdba7a5058f, server=7bf281cf3991,38217,1731497394782}] 2024-11-13T11:29:55,965 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f. 2024-11-13T11:29:55,965 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => fc299128ef471b6596e90fdba7a5058f, NAME => 'TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f.', STARTKEY => '', ENDKEY => ''} 2024-11-13T11:29:55,966 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:29:55,966 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:29:55,966 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:29:55,966 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:29:55,968 INFO [StoreOpener-fc299128ef471b6596e90fdba7a5058f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:29:55,970 INFO [StoreOpener-fc299128ef471b6596e90fdba7a5058f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fc299128ef471b6596e90fdba7a5058f columnFamilyName info 2024-11-13T11:29:55,970 DEBUG [StoreOpener-fc299128ef471b6596e90fdba7a5058f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:29:55,970 INFO [StoreOpener-fc299128ef471b6596e90fdba7a5058f-1 {}] regionserver.HStore(327): Store=fc299128ef471b6596e90fdba7a5058f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:29:55,971 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:29:55,972 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:29:55,972 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:29:55,973 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:29:55,973 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:29:55,975 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:29:55,976 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T11:29:55,977 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened fc299128ef471b6596e90fdba7a5058f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=725883, jitterRate=-0.0769927054643631}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T11:29:55,977 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:29:55,978 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for fc299128ef471b6596e90fdba7a5058f: Running coprocessor pre-open hook at 1731497395966Writing region info on filesystem at 1731497395966Initializing all the Stores at 1731497395967 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497395968 (+1 ms)Cleaning up temporary data from old regions at 1731497395973 (+5 ms)Running coprocessor post-open hooks at 1731497395977 (+4 ms)Region opened successfully at 1731497395977 2024-11-13T11:29:55,979 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f., pid=6, masterSystemTime=1731497395958 2024-11-13T11:29:55,981 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f. 2024-11-13T11:29:55,981 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f. 2024-11-13T11:29:55,981 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=fc299128ef471b6596e90fdba7a5058f, regionState=OPEN, openSeqNum=2, regionLocation=7bf281cf3991,38217,1731497394782 2024-11-13T11:29:55,983 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure fc299128ef471b6596e90fdba7a5058f, server=7bf281cf3991,38217,1731497394782 because future has completed 2024-11-13T11:29:55,987 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T11:29:55,987 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure fc299128ef471b6596e90fdba7a5058f, server=7bf281cf3991,38217,1731497394782 in 181 msec 2024-11-13T11:29:55,990 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T11:29:55,990 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fc299128ef471b6596e90fdba7a5058f, ASSIGN in 343 msec 2024-11-13T11:29:55,991 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T11:29:55,991 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731497395991"}]},"ts":"1731497395991"} 2024-11-13T11:29:55,993 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-13T11:29:55,994 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T11:29:55,996 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 380 msec 2024-11-13T11:29:56,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:56,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:57,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:57,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:58,644 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:58,645 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:58,645 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:58,645 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:58,646 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:58,646 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:58,647 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:58,647 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:58,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:58,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:58,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:58,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:58,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:58,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:58,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:58,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:58,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:58,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:58,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:58,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:59,174 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T11:29:59,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:59,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:59,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:59,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:59,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:59,179 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:59,180 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:59,180 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:59,197 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:59,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:59,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:59,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:59,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:59,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:59,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:59,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:59,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:59,205 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:29:59,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:29:59,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:00,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:00,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:01,021 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T11:30:01,022 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-13T11:30:01,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:01,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:02,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:02,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:03,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:03,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:03,965 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T11:30:03,965 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T11:30:03,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T11:30:03,966 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T11:30:03,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T11:30:03,966 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-13T11:30:03,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-13T11:30:03,966 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-13T11:30:04,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:04,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:05,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45517 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T11:30:05,681 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-13T11:30:05,681 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-13T11:30:05,683 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-13T11:30:05,683 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f. 2024-11-13T11:30:05,686 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f., hostname=7bf281cf3991,38217,1731497394782, seqNum=2] 2024-11-13T11:30:05,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:30:05,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fc299128ef471b6596e90fdba7a5058f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T11:30:05,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/9ee7f1b14e054166a2a08063fe27f103 is 1080, key is row0001/info:/1731497405686/Put/seqid=0 2024-11-13T11:30:05,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741837_1013 (size=12509) 2024-11-13T11:30:05,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741837_1013 (size=12509) 2024-11-13T11:30:05,726 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/9ee7f1b14e054166a2a08063fe27f103 2024-11-13T11:30:05,733 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/9ee7f1b14e054166a2a08063fe27f103 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/9ee7f1b14e054166a2a08063fe27f103 2024-11-13T11:30:05,739 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/9ee7f1b14e054166a2a08063fe27f103, entries=7, sequenceid=11, filesize=12.2 K 2024-11-13T11:30:05,740 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for fc299128ef471b6596e90fdba7a5058f in 42ms, sequenceid=11, compaction requested=false 2024-11-13T11:30:05,740 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fc299128ef471b6596e90fdba7a5058f: 2024-11-13T11:30:05,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:30:05,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fc299128ef471b6596e90fdba7a5058f 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-13T11:30:05,747 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/d811b94f3b494a47a257a2b08e5df2a5 is 1080, key is row0008/info:/1731497405699/Put/seqid=0 2024-11-13T11:30:05,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741838_1014 (size=29761) 2024-11-13T11:30:05,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741838_1014 (size=29761) 2024-11-13T11:30:05,752 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/d811b94f3b494a47a257a2b08e5df2a5 2024-11-13T11:30:05,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/d811b94f3b494a47a257a2b08e5df2a5 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/d811b94f3b494a47a257a2b08e5df2a5 2024-11-13T11:30:05,764 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/d811b94f3b494a47a257a2b08e5df2a5, entries=23, sequenceid=37, filesize=29.1 K 2024-11-13T11:30:05,765 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for fc299128ef471b6596e90fdba7a5058f in 24ms, sequenceid=37, compaction requested=false 2024-11-13T11:30:05,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fc299128ef471b6596e90fdba7a5058f: 2024-11-13T11:30:05,765 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-13T11:30:05,765 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:30:05,766 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/d811b94f3b494a47a257a2b08e5df2a5 because midkey is the same as first or last row 2024-11-13T11:30:05,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:05,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:06,790 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:06,790 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:07,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:30:07,762 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fc299128ef471b6596e90fdba7a5058f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T11:30:07,767 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/f7fa8ebdc0a94ce7a2bddaf17081e14f is 1080, key is row0031/info:/1731497405742/Put/seqid=0 2024-11-13T11:30:07,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741839_1015 (size=12509) 2024-11-13T11:30:07,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741839_1015 (size=12509) 2024-11-13T11:30:07,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:07,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:07,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=fc299128ef471b6596e90fdba7a5058f, server=7bf281cf3991,38217,1731497394782 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-13T11:30:07,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:44852 deadline: 1731497417801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=fc299128ef471b6596e90fdba7a5058f, server=7bf281cf3991,38217,1731497394782 2024-11-13T11:30:07,805 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f., hostname=7bf281cf3991,38217,1731497394782, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f., hostname=7bf281cf3991,38217,1731497394782, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=fc299128ef471b6596e90fdba7a5058f, server=7bf281cf3991,38217,1731497394782 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T11:30:07,806 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f., hostname=7bf281cf3991,38217,1731497394782, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=fc299128ef471b6596e90fdba7a5058f, server=7bf281cf3991,38217,1731497394782 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T11:30:07,806 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f., hostname=7bf281cf3991,38217,1731497394782, seqNum=2 because the exception is null or not the one we care about 2024-11-13T11:30:08,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/f7fa8ebdc0a94ce7a2bddaf17081e14f 2024-11-13T11:30:08,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/f7fa8ebdc0a94ce7a2bddaf17081e14f as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/f7fa8ebdc0a94ce7a2bddaf17081e14f 2024-11-13T11:30:08,193 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/f7fa8ebdc0a94ce7a2bddaf17081e14f, entries=7, sequenceid=47, filesize=12.2 K 2024-11-13T11:30:08,194 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for fc299128ef471b6596e90fdba7a5058f in 432ms, sequenceid=47, compaction requested=true 2024-11-13T11:30:08,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fc299128ef471b6596e90fdba7a5058f: 2024-11-13T11:30:08,195 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-13T11:30:08,195 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:30:08,195 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/d811b94f3b494a47a257a2b08e5df2a5 because midkey is the same as first or last row 2024-11-13T11:30:08,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fc299128ef471b6596e90fdba7a5058f:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T11:30:08,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:30:08,195 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T11:30:08,196 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T11:30:08,196 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1541): fc299128ef471b6596e90fdba7a5058f/info is initiating minor compaction (all files) 2024-11-13T11:30:08,197 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of fc299128ef471b6596e90fdba7a5058f/info in TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f. 2024-11-13T11:30:08,197 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/9ee7f1b14e054166a2a08063fe27f103, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/d811b94f3b494a47a257a2b08e5df2a5, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/f7fa8ebdc0a94ce7a2bddaf17081e14f] into tmpdir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp, totalSize=53.5 K 2024-11-13T11:30:08,197 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9ee7f1b14e054166a2a08063fe27f103, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731497405686 2024-11-13T11:30:08,197 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting d811b94f3b494a47a257a2b08e5df2a5, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731497405699 2024-11-13T11:30:08,198 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting f7fa8ebdc0a94ce7a2bddaf17081e14f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731497405742 2024-11-13T11:30:08,208 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fc299128ef471b6596e90fdba7a5058f#info#compaction#57 average throughput is 37.97 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T11:30:08,209 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/9be434756a6d4867be511b223b159c3a is 1080, key is row0001/info:/1731497405686/Put/seqid=0 2024-11-13T11:30:08,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741840_1016 (size=44978) 2024-11-13T11:30:08,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741840_1016 (size=44978) 2024-11-13T11:30:08,219 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/9be434756a6d4867be511b223b159c3a as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/9be434756a6d4867be511b223b159c3a 2024-11-13T11:30:08,225 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in fc299128ef471b6596e90fdba7a5058f/info of fc299128ef471b6596e90fdba7a5058f into 9be434756a6d4867be511b223b159c3a(size=43.9 K), total size for store is 43.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T11:30:08,225 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for fc299128ef471b6596e90fdba7a5058f: 2024-11-13T11:30:08,225 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f., storeName=fc299128ef471b6596e90fdba7a5058f/info, priority=13, startTime=1731497408195; duration=0sec 2024-11-13T11:30:08,225 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=43.9 K, sizeToCheck=16.0 K 2024-11-13T11:30:08,225 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:30:08,225 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/9be434756a6d4867be511b223b159c3a because midkey is the same as first or last row 2024-11-13T11:30:08,225 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=43.9 K, sizeToCheck=16.0 K 2024-11-13T11:30:08,225 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:30:08,225 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/9be434756a6d4867be511b223b159c3a because midkey is the same as first or last row 2024-11-13T11:30:08,225 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=43.9 K, sizeToCheck=16.0 K 2024-11-13T11:30:08,225 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:30:08,225 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/9be434756a6d4867be511b223b159c3a because midkey is the same as first or last row 2024-11-13T11:30:08,225 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:30:08,225 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fc299128ef471b6596e90fdba7a5058f:info 2024-11-13T11:30:08,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:08,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:09,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:09,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:10,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:10,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:11,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:11,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:12,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:12,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:13,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:13,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:14,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:14,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:15,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:15,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:16,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:16,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:17,803 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:17,803 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:17,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:30:17,885 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fc299128ef471b6596e90fdba7a5058f 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-13T11:30:17,894 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/00f6116bae1c485aac7b88a707044b3e is 1080, key is row0038/info:/1731497407763/Put/seqid=0 2024-11-13T11:30:17,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741841_1017 (size=29761) 2024-11-13T11:30:17,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741841_1017 (size=29761) 2024-11-13T11:30:17,900 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/00f6116bae1c485aac7b88a707044b3e 2024-11-13T11:30:17,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/00f6116bae1c485aac7b88a707044b3e as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/00f6116bae1c485aac7b88a707044b3e 2024-11-13T11:30:17,911 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/00f6116bae1c485aac7b88a707044b3e, entries=23, sequenceid=74, filesize=29.1 K 2024-11-13T11:30:17,912 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=4.20 KB/4304 for fc299128ef471b6596e90fdba7a5058f in 27ms, sequenceid=74, compaction requested=false 2024-11-13T11:30:17,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fc299128ef471b6596e90fdba7a5058f: 2024-11-13T11:30:17,912 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.0 K, sizeToCheck=16.0 K 2024-11-13T11:30:17,912 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:30:17,912 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/9be434756a6d4867be511b223b159c3a because midkey is the same as first or last row 2024-11-13T11:30:18,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:18,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:19,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:19,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:19,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:30:19,913 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fc299128ef471b6596e90fdba7a5058f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T11:30:19,918 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/feac30eb12d44e2c9fa5ac3049ed4a94 is 1080, key is row0061/info:/1731497417889/Put/seqid=0 2024-11-13T11:30:19,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741842_1018 (size=12509) 2024-11-13T11:30:19,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741842_1018 (size=12509) 2024-11-13T11:30:19,923 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=84 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/feac30eb12d44e2c9fa5ac3049ed4a94 2024-11-13T11:30:19,929 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/feac30eb12d44e2c9fa5ac3049ed4a94 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/feac30eb12d44e2c9fa5ac3049ed4a94 2024-11-13T11:30:19,934 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/feac30eb12d44e2c9fa5ac3049ed4a94, entries=7, sequenceid=84, filesize=12.2 K 2024-11-13T11:30:19,935 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for fc299128ef471b6596e90fdba7a5058f in 22ms, sequenceid=84, compaction requested=true 2024-11-13T11:30:19,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fc299128ef471b6596e90fdba7a5058f: 2024-11-13T11:30:19,935 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=85.2 K, sizeToCheck=16.0 K 2024-11-13T11:30:19,935 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:30:19,936 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/9be434756a6d4867be511b223b159c3a because midkey is the same as first or last row 2024-11-13T11:30:19,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fc299128ef471b6596e90fdba7a5058f:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T11:30:19,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:30:19,936 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T11:30:19,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:30:19,937 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fc299128ef471b6596e90fdba7a5058f 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-13T11:30:19,937 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 87248 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T11:30:19,937 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1541): fc299128ef471b6596e90fdba7a5058f/info is initiating minor compaction (all files) 2024-11-13T11:30:19,937 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of fc299128ef471b6596e90fdba7a5058f/info in TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f. 2024-11-13T11:30:19,937 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/9be434756a6d4867be511b223b159c3a, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/00f6116bae1c485aac7b88a707044b3e, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/feac30eb12d44e2c9fa5ac3049ed4a94] into tmpdir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp, totalSize=85.2 K 2024-11-13T11:30:19,938 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9be434756a6d4867be511b223b159c3a, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731497405686 2024-11-13T11:30:19,938 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting 00f6116bae1c485aac7b88a707044b3e, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1731497407763 2024-11-13T11:30:19,938 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting feac30eb12d44e2c9fa5ac3049ed4a94, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=84, earliestPutTs=1731497417889 2024-11-13T11:30:19,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/805459ed1d5d492e8c1f6e5de6bab2fc is 1080, key is row0068/info:/1731497419914/Put/seqid=0 2024-11-13T11:30:19,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741843_1019 (size=17894) 2024-11-13T11:30:19,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741843_1019 (size=17894) 2024-11-13T11:30:19,951 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/805459ed1d5d492e8c1f6e5de6bab2fc 2024-11-13T11:30:19,955 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fc299128ef471b6596e90fdba7a5058f#info#compaction#61 average throughput is 34.38 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T11:30:19,956 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/4a7671e9b98a410894b741122d58b322 is 1080, key is row0001/info:/1731497405686/Put/seqid=0 2024-11-13T11:30:19,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741844_1020 (size=77532) 2024-11-13T11:30:19,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741844_1020 (size=77532) 2024-11-13T11:30:19,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/805459ed1d5d492e8c1f6e5de6bab2fc as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/805459ed1d5d492e8c1f6e5de6bab2fc 2024-11-13T11:30:19,966 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/4a7671e9b98a410894b741122d58b322 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/4a7671e9b98a410894b741122d58b322 2024-11-13T11:30:19,967 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/805459ed1d5d492e8c1f6e5de6bab2fc, entries=12, sequenceid=99, filesize=17.5 K 2024-11-13T11:30:19,968 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for fc299128ef471b6596e90fdba7a5058f in 30ms, sequenceid=99, compaction requested=false 2024-11-13T11:30:19,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fc299128ef471b6596e90fdba7a5058f: 2024-11-13T11:30:19,968 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.7 K, sizeToCheck=16.0 K 2024-11-13T11:30:19,968 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:30:19,968 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/9be434756a6d4867be511b223b159c3a because midkey is the same as first or last row 2024-11-13T11:30:19,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:30:19,970 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fc299128ef471b6596e90fdba7a5058f 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-13T11:30:19,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/f5a5d31a6fb84723ac85f5d6a665c9f7 is 1080, key is row0080/info:/1731497419938/Put/seqid=0 2024-11-13T11:30:19,975 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in fc299128ef471b6596e90fdba7a5058f/info of fc299128ef471b6596e90fdba7a5058f into 4a7671e9b98a410894b741122d58b322(size=75.7 K), total size for store is 93.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T11:30:19,975 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for fc299128ef471b6596e90fdba7a5058f: 2024-11-13T11:30:19,975 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f., storeName=fc299128ef471b6596e90fdba7a5058f/info, priority=13, startTime=1731497419936; duration=0sec 2024-11-13T11:30:19,975 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-13T11:30:19,975 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:30:19,975 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-13T11:30:19,975 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:30:19,975 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-13T11:30:19,975 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:30:19,977 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:30:19,977 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:30:19,977 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fc299128ef471b6596e90fdba7a5058f:info 2024-11-13T11:30:19,978 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45517 {}] assignment.AssignmentManager(1355): Split request from 7bf281cf3991,38217,1731497394782, parent={ENCODED => fc299128ef471b6596e90fdba7a5058f, NAME => 'TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-13T11:30:19,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741845_1021 (size=16817) 2024-11-13T11:30:19,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741845_1021 (size=16817) 2024-11-13T11:30:19,983 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/f5a5d31a6fb84723ac85f5d6a665c9f7 2024-11-13T11:30:19,985 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45517 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=7bf281cf3991,38217,1731497394782 2024-11-13T11:30:19,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/f5a5d31a6fb84723ac85f5d6a665c9f7 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/f5a5d31a6fb84723ac85f5d6a665c9f7 2024-11-13T11:30:19,989 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45517 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=fc299128ef471b6596e90fdba7a5058f, daughterA=343252d4dd2ed97f4f7c754befd7d1d4, daughterB=5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:19,990 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=fc299128ef471b6596e90fdba7a5058f, daughterA=343252d4dd2ed97f4f7c754befd7d1d4, daughterB=5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:19,991 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=fc299128ef471b6596e90fdba7a5058f, daughterA=343252d4dd2ed97f4f7c754befd7d1d4, daughterB=5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:19,991 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=fc299128ef471b6596e90fdba7a5058f, daughterA=343252d4dd2ed97f4f7c754befd7d1d4, daughterB=5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:19,995 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/f5a5d31a6fb84723ac85f5d6a665c9f7, entries=11, sequenceid=113, filesize=16.4 K 2024-11-13T11:30:19,996 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=6.30 KB/6456 for fc299128ef471b6596e90fdba7a5058f in 26ms, sequenceid=113, compaction requested=true 2024-11-13T11:30:19,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fc299128ef471b6596e90fdba7a5058f: 2024-11-13T11:30:19,996 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=109.6 K, sizeToCheck=16.0 K 2024-11-13T11:30:19,996 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:30:19,996 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=109.6 K, sizeToCheck=16.0 K 2024-11-13T11:30:19,996 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:30:19,996 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=109.6 K, sizeToCheck=16.0 K 2024-11-13T11:30:19,997 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T11:30:19,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:30:19,997 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45517 {}] assignment.AssignmentManager(1355): Split request from 7bf281cf3991,38217,1731497394782, parent={ENCODED => fc299128ef471b6596e90fdba7a5058f, NAME => 'TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-13T11:30:19,998 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45517 {}] assignment.AssignmentManager(1370): Ignoring split request from 7bf281cf3991,38217,1731497394782, parent={ENCODED => fc299128ef471b6596e90fdba7a5058f, NAME => 'TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f.', STARTKEY => '', ENDKEY => ''} because parent is unknown or not open 2024-11-13T11:30:19,999 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fc299128ef471b6596e90fdba7a5058f, UNASSIGN}] 2024-11-13T11:30:20,001 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fc299128ef471b6596e90fdba7a5058f, UNASSIGN 2024-11-13T11:30:20,002 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=fc299128ef471b6596e90fdba7a5058f, regionState=CLOSING, regionLocation=7bf281cf3991,38217,1731497394782 2024-11-13T11:30:20,005 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fc299128ef471b6596e90fdba7a5058f, UNASSIGN because future has completed 2024-11-13T11:30:20,005 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-13T11:30:20,005 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure fc299128ef471b6596e90fdba7a5058f, server=7bf281cf3991,38217,1731497394782}] 2024-11-13T11:30:20,163 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:30:20,163 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-13T11:30:20,164 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing fc299128ef471b6596e90fdba7a5058f, disabling compactions & flushes 2024-11-13T11:30:20,164 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f. 2024-11-13T11:30:20,164 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f. 2024-11-13T11:30:20,164 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f. after waiting 0 ms 2024-11-13T11:30:20,164 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f. 2024-11-13T11:30:20,164 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing fc299128ef471b6596e90fdba7a5058f 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-11-13T11:30:20,169 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/eced3d5057fa4eaaba43f9a67d348594 is 1080, key is row0091/info:/1731497419971/Put/seqid=0 2024-11-13T11:30:20,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741846_1022 (size=11424) 2024-11-13T11:30:20,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741846_1022 (size=11424) 2024-11-13T11:30:20,175 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/eced3d5057fa4eaaba43f9a67d348594 2024-11-13T11:30:20,180 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/.tmp/info/eced3d5057fa4eaaba43f9a67d348594 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/eced3d5057fa4eaaba43f9a67d348594 2024-11-13T11:30:20,186 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/eced3d5057fa4eaaba43f9a67d348594, entries=6, sequenceid=123, filesize=11.2 K 2024-11-13T11:30:20,187 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6456, heapSize ~6.98 KB/7152, currentSize=0 B/0 for fc299128ef471b6596e90fdba7a5058f in 23ms, sequenceid=123, compaction requested=true 2024-11-13T11:30:20,189 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/9ee7f1b14e054166a2a08063fe27f103, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/d811b94f3b494a47a257a2b08e5df2a5, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/9be434756a6d4867be511b223b159c3a, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/f7fa8ebdc0a94ce7a2bddaf17081e14f, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/00f6116bae1c485aac7b88a707044b3e, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/feac30eb12d44e2c9fa5ac3049ed4a94] to archive 2024-11-13T11:30:20,190 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T11:30:20,192 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/9ee7f1b14e054166a2a08063fe27f103 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/9ee7f1b14e054166a2a08063fe27f103 2024-11-13T11:30:20,193 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/d811b94f3b494a47a257a2b08e5df2a5 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/d811b94f3b494a47a257a2b08e5df2a5 2024-11-13T11:30:20,195 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/9be434756a6d4867be511b223b159c3a to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/9be434756a6d4867be511b223b159c3a 2024-11-13T11:30:20,196 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/f7fa8ebdc0a94ce7a2bddaf17081e14f to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/f7fa8ebdc0a94ce7a2bddaf17081e14f 2024-11-13T11:30:20,198 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/00f6116bae1c485aac7b88a707044b3e to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/00f6116bae1c485aac7b88a707044b3e 2024-11-13T11:30:20,199 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/feac30eb12d44e2c9fa5ac3049ed4a94 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/feac30eb12d44e2c9fa5ac3049ed4a94 2024-11-13T11:30:20,207 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-11-13T11:30:20,208 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f. 2024-11-13T11:30:20,208 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for fc299128ef471b6596e90fdba7a5058f: Waiting for close lock at 1731497420164Running coprocessor pre-close hooks at 1731497420164Disabling compacts and flushes for region at 1731497420164Disabling writes for close at 1731497420164Obtaining lock to block concurrent updates at 1731497420164Preparing flush snapshotting stores in fc299128ef471b6596e90fdba7a5058f at 1731497420164Finished memstore snapshotting TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f., syncing WAL and waiting on mvcc, flushsize=dataSize=6456, getHeapSize=7152, getOffHeapSize=0, getCellsCount=6 at 1731497420164Flushing stores of TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f. at 1731497420165 (+1 ms)Flushing fc299128ef471b6596e90fdba7a5058f/info: creating writer at 1731497420165Flushing fc299128ef471b6596e90fdba7a5058f/info: appending metadata at 1731497420168 (+3 ms)Flushing fc299128ef471b6596e90fdba7a5058f/info: closing flushed file at 1731497420169 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f32fdc3: reopening flushed file at 1731497420179 (+10 ms)Finished flush of dataSize ~6.30 KB/6456, heapSize ~6.98 KB/7152, currentSize=0 B/0 for fc299128ef471b6596e90fdba7a5058f in 23ms, sequenceid=123, compaction requested=true at 1731497420187 (+8 ms)Writing region close event to WAL at 1731497420203 (+16 ms)Running coprocessor post-close hooks at 1731497420208 (+5 ms)Closed at 1731497420208 2024-11-13T11:30:20,211 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:30:20,211 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=fc299128ef471b6596e90fdba7a5058f, regionState=CLOSED 2024-11-13T11:30:20,214 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure fc299128ef471b6596e90fdba7a5058f, server=7bf281cf3991,38217,1731497394782 because future has completed 2024-11-13T11:30:20,219 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-13T11:30:20,219 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure fc299128ef471b6596e90fdba7a5058f, server=7bf281cf3991,38217,1731497394782 in 210 msec 2024-11-13T11:30:20,222 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-13T11:30:20,222 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fc299128ef471b6596e90fdba7a5058f, UNASSIGN in 220 msec 2024-11-13T11:30:20,230 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:30:20,234 INFO [PEWorker-3 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=fc299128ef471b6596e90fdba7a5058f, threads=4 2024-11-13T11:30:20,236 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/805459ed1d5d492e8c1f6e5de6bab2fc for region: fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:30:20,236 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/f5a5d31a6fb84723ac85f5d6a665c9f7 for region: fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:30:20,236 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/eced3d5057fa4eaaba43f9a67d348594 for region: fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:30:20,236 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/4a7671e9b98a410894b741122d58b322 for region: fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:30:20,248 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/805459ed1d5d492e8c1f6e5de6bab2fc, top=true 2024-11-13T11:30:20,248 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/f5a5d31a6fb84723ac85f5d6a665c9f7, top=true 2024-11-13T11:30:20,255 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/eced3d5057fa4eaaba43f9a67d348594, top=true 2024-11-13T11:30:20,261 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-805459ed1d5d492e8c1f6e5de6bab2fc for child: 5e76b6ca748a25342e27876a7fc094c8, parent: fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:30:20,261 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/805459ed1d5d492e8c1f6e5de6bab2fc for region: fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:30:20,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741847_1023 (size=27) 2024-11-13T11:30:20,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741847_1023 (size=27) 2024-11-13T11:30:20,267 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-f5a5d31a6fb84723ac85f5d6a665c9f7 for child: 5e76b6ca748a25342e27876a7fc094c8, parent: fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:30:20,267 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/f5a5d31a6fb84723ac85f5d6a665c9f7 for region: fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:30:20,269 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-eced3d5057fa4eaaba43f9a67d348594 for child: 5e76b6ca748a25342e27876a7fc094c8, parent: fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:30:20,269 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/eced3d5057fa4eaaba43f9a67d348594 for region: fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:30:20,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741848_1024 (size=27) 2024-11-13T11:30:20,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741848_1024 (size=27) 2024-11-13T11:30:20,278 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/4a7671e9b98a410894b741122d58b322 for region: fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:30:20,280 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region fc299128ef471b6596e90fdba7a5058f Daughter A: [hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/343252d4dd2ed97f4f7c754befd7d1d4/info/4a7671e9b98a410894b741122d58b322.fc299128ef471b6596e90fdba7a5058f] storefiles, Daughter B: [hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/4a7671e9b98a410894b741122d58b322.fc299128ef471b6596e90fdba7a5058f, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-805459ed1d5d492e8c1f6e5de6bab2fc, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-eced3d5057fa4eaaba43f9a67d348594, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-f5a5d31a6fb84723ac85f5d6a665c9f7] storefiles. 2024-11-13T11:30:20,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741849_1025 (size=71) 2024-11-13T11:30:20,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741849_1025 (size=71) 2024-11-13T11:30:20,291 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:30:20,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741850_1026 (size=71) 2024-11-13T11:30:20,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741850_1026 (size=71) 2024-11-13T11:30:20,305 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:30:20,317 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/343252d4dd2ed97f4f7c754befd7d1d4/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-13T11:30:20,319 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-13T11:30:20,322 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731497420322"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731497420322"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731497420322"}]},"ts":"1731497420322"} 2024-11-13T11:30:20,322 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731497420322"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731497420322"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731497420322"}]},"ts":"1731497420322"} 2024-11-13T11:30:20,322 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731497420322"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731497420322"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731497420322"}]},"ts":"1731497420322"} 2024-11-13T11:30:20,340 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=343252d4dd2ed97f4f7c754befd7d1d4, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5e76b6ca748a25342e27876a7fc094c8, ASSIGN}] 2024-11-13T11:30:20,341 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5e76b6ca748a25342e27876a7fc094c8, ASSIGN 2024-11-13T11:30:20,341 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=343252d4dd2ed97f4f7c754befd7d1d4, ASSIGN 2024-11-13T11:30:20,342 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=343252d4dd2ed97f4f7c754befd7d1d4, ASSIGN; state=SPLITTING_NEW, location=7bf281cf3991,38217,1731497394782; forceNewPlan=false, retain=false 2024-11-13T11:30:20,342 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5e76b6ca748a25342e27876a7fc094c8, ASSIGN; state=SPLITTING_NEW, location=7bf281cf3991,38217,1731497394782; forceNewPlan=false, retain=false 2024-11-13T11:30:20,494 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=5e76b6ca748a25342e27876a7fc094c8, regionState=OPENING, regionLocation=7bf281cf3991,38217,1731497394782 2024-11-13T11:30:20,494 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=343252d4dd2ed97f4f7c754befd7d1d4, regionState=OPENING, regionLocation=7bf281cf3991,38217,1731497394782 2024-11-13T11:30:20,500 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5e76b6ca748a25342e27876a7fc094c8, ASSIGN because future has completed 2024-11-13T11:30:20,500 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5e76b6ca748a25342e27876a7fc094c8, server=7bf281cf3991,38217,1731497394782}] 2024-11-13T11:30:20,501 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=343252d4dd2ed97f4f7c754befd7d1d4, ASSIGN because future has completed 2024-11-13T11:30:20,502 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 343252d4dd2ed97f4f7c754befd7d1d4, server=7bf281cf3991,38217,1731497394782}] 2024-11-13T11:30:20,661 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4. 2024-11-13T11:30:20,661 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 343252d4dd2ed97f4f7c754befd7d1d4, NAME => 'TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-13T11:30:20,662 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 343252d4dd2ed97f4f7c754befd7d1d4 2024-11-13T11:30:20,662 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:30:20,662 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 343252d4dd2ed97f4f7c754befd7d1d4 2024-11-13T11:30:20,662 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 343252d4dd2ed97f4f7c754befd7d1d4 2024-11-13T11:30:20,664 INFO [StoreOpener-343252d4dd2ed97f4f7c754befd7d1d4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 343252d4dd2ed97f4f7c754befd7d1d4 2024-11-13T11:30:20,665 INFO [StoreOpener-343252d4dd2ed97f4f7c754befd7d1d4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 343252d4dd2ed97f4f7c754befd7d1d4 columnFamilyName info 2024-11-13T11:30:20,665 DEBUG [StoreOpener-343252d4dd2ed97f4f7c754befd7d1d4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:30:20,677 DEBUG [StoreOpener-343252d4dd2ed97f4f7c754befd7d1d4-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/343252d4dd2ed97f4f7c754befd7d1d4/info/4a7671e9b98a410894b741122d58b322.fc299128ef471b6596e90fdba7a5058f->hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/4a7671e9b98a410894b741122d58b322-bottom 2024-11-13T11:30:20,678 INFO [StoreOpener-343252d4dd2ed97f4f7c754befd7d1d4-1 {}] regionserver.HStore(327): Store=343252d4dd2ed97f4f7c754befd7d1d4/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:30:20,678 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 343252d4dd2ed97f4f7c754befd7d1d4 2024-11-13T11:30:20,679 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/343252d4dd2ed97f4f7c754befd7d1d4 2024-11-13T11:30:20,680 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/343252d4dd2ed97f4f7c754befd7d1d4 2024-11-13T11:30:20,680 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 343252d4dd2ed97f4f7c754befd7d1d4 2024-11-13T11:30:20,680 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 343252d4dd2ed97f4f7c754befd7d1d4 2024-11-13T11:30:20,682 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 343252d4dd2ed97f4f7c754befd7d1d4 2024-11-13T11:30:20,683 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 343252d4dd2ed97f4f7c754befd7d1d4; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=690976, jitterRate=-0.12137894332408905}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T11:30:20,683 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 343252d4dd2ed97f4f7c754befd7d1d4 2024-11-13T11:30:20,683 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 343252d4dd2ed97f4f7c754befd7d1d4: Running coprocessor pre-open hook at 1731497420662Writing region info on filesystem at 1731497420662Initializing all the Stores at 1731497420663 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497420664 (+1 ms)Cleaning up temporary data from old regions at 1731497420680 (+16 ms)Running coprocessor post-open hooks at 1731497420683 (+3 ms)Region opened successfully at 1731497420683 2024-11-13T11:30:20,684 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4., pid=13, masterSystemTime=1731497420653 2024-11-13T11:30:20,684 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 343252d4dd2ed97f4f7c754befd7d1d4:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T11:30:20,684 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-13T11:30:20,684 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:30:20,685 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4. 2024-11-13T11:30:20,685 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1541): 343252d4dd2ed97f4f7c754befd7d1d4/info is initiating minor compaction (all files) 2024-11-13T11:30:20,685 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 343252d4dd2ed97f4f7c754befd7d1d4/info in TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4. 2024-11-13T11:30:20,685 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/343252d4dd2ed97f4f7c754befd7d1d4/info/4a7671e9b98a410894b741122d58b322.fc299128ef471b6596e90fdba7a5058f->hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/4a7671e9b98a410894b741122d58b322-bottom] into tmpdir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/343252d4dd2ed97f4f7c754befd7d1d4/.tmp, totalSize=75.7 K 2024-11-13T11:30:20,686 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4a7671e9b98a410894b741122d58b322.fc299128ef471b6596e90fdba7a5058f, keycount=33, bloomtype=ROW, size=75.7 K, encoding=NONE, compression=NONE, seqNum=84, earliestPutTs=1731497405686 2024-11-13T11:30:20,686 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4. 2024-11-13T11:30:20,686 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4. 2024-11-13T11:30:20,687 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8. 2024-11-13T11:30:20,687 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 5e76b6ca748a25342e27876a7fc094c8, NAME => 'TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-13T11:30:20,687 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:20,687 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=343252d4dd2ed97f4f7c754befd7d1d4, regionState=OPEN, openSeqNum=127, regionLocation=7bf281cf3991,38217,1731497394782 2024-11-13T11:30:20,687 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:30:20,687 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:20,687 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:20,689 INFO [StoreOpener-5e76b6ca748a25342e27876a7fc094c8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:20,689 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-13T11:30:20,690 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-13T11:30:20,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 343252d4dd2ed97f4f7c754befd7d1d4, server=7bf281cf3991,38217,1731497394782 because future has completed 2024-11-13T11:30:20,690 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-13T11:30:20,690 INFO [StoreOpener-5e76b6ca748a25342e27876a7fc094c8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5e76b6ca748a25342e27876a7fc094c8 columnFamilyName info 2024-11-13T11:30:20,690 DEBUG [StoreOpener-5e76b6ca748a25342e27876a7fc094c8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:30:20,695 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-13T11:30:20,695 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 343252d4dd2ed97f4f7c754befd7d1d4, server=7bf281cf3991,38217,1731497394782 in 189 msec 2024-11-13T11:30:20,697 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=343252d4dd2ed97f4f7c754befd7d1d4, ASSIGN in 355 msec 2024-11-13T11:30:20,708 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 343252d4dd2ed97f4f7c754befd7d1d4#info#compaction#64 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T11:30:20,708 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/.tmp/info/048f06dff3e842dea5887a65259776af is 193, key is TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8./info:regioninfo/1731497420493/Put/seqid=0 2024-11-13T11:30:20,708 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/343252d4dd2ed97f4f7c754befd7d1d4/.tmp/info/adb5cce60a5f4c9a9e5f36407e9d3689 is 1080, key is row0001/info:/1731497405686/Put/seqid=0 2024-11-13T11:30:20,711 DEBUG [StoreOpener-5e76b6ca748a25342e27876a7fc094c8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/4a7671e9b98a410894b741122d58b322.fc299128ef471b6596e90fdba7a5058f->hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/4a7671e9b98a410894b741122d58b322-top 2024-11-13T11:30:20,716 DEBUG [StoreOpener-5e76b6ca748a25342e27876a7fc094c8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-805459ed1d5d492e8c1f6e5de6bab2fc 2024-11-13T11:30:20,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741852_1028 (size=9847) 2024-11-13T11:30:20,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741852_1028 (size=9847) 2024-11-13T11:30:20,718 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/.tmp/info/048f06dff3e842dea5887a65259776af 2024-11-13T11:30:20,721 DEBUG [StoreOpener-5e76b6ca748a25342e27876a7fc094c8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-eced3d5057fa4eaaba43f9a67d348594 2024-11-13T11:30:20,726 DEBUG [StoreOpener-5e76b6ca748a25342e27876a7fc094c8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-f5a5d31a6fb84723ac85f5d6a665c9f7 2024-11-13T11:30:20,726 INFO [StoreOpener-5e76b6ca748a25342e27876a7fc094c8-1 {}] regionserver.HStore(327): Store=5e76b6ca748a25342e27876a7fc094c8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:30:20,726 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:20,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741851_1027 (size=70862) 2024-11-13T11:30:20,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741851_1027 (size=70862) 2024-11-13T11:30:20,727 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:20,728 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:20,729 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:20,729 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:20,731 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:20,732 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 5e76b6ca748a25342e27876a7fc094c8; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=690957, jitterRate=-0.12140357494354248}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T11:30:20,732 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:20,732 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 5e76b6ca748a25342e27876a7fc094c8: Running coprocessor pre-open hook at 1731497420688Writing region info on filesystem at 1731497420688Initializing all the Stores at 1731497420688Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497420689 (+1 ms)Cleaning up temporary data from old regions at 1731497420729 (+40 ms)Running coprocessor post-open hooks at 1731497420732 (+3 ms)Region opened successfully at 1731497420732 2024-11-13T11:30:20,733 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8., pid=12, masterSystemTime=1731497420653 2024-11-13T11:30:20,733 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/343252d4dd2ed97f4f7c754befd7d1d4/.tmp/info/adb5cce60a5f4c9a9e5f36407e9d3689 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/343252d4dd2ed97f4f7c754befd7d1d4/info/adb5cce60a5f4c9a9e5f36407e9d3689 2024-11-13T11:30:20,733 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 5e76b6ca748a25342e27876a7fc094c8:info, priority=-2147483648, current under compaction store size is 2 2024-11-13T11:30:20,733 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:30:20,733 DEBUG [RS:0;7bf281cf3991:38217-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-13T11:30:20,734 INFO [RS:0;7bf281cf3991:38217-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8. 2024-11-13T11:30:20,734 DEBUG [RS:0;7bf281cf3991:38217-longCompactions-0 {}] regionserver.HStore(1541): 5e76b6ca748a25342e27876a7fc094c8/info is initiating minor compaction (all files) 2024-11-13T11:30:20,734 INFO [RS:0;7bf281cf3991:38217-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 5e76b6ca748a25342e27876a7fc094c8/info in TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8. 2024-11-13T11:30:20,734 INFO [RS:0;7bf281cf3991:38217-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/4a7671e9b98a410894b741122d58b322.fc299128ef471b6596e90fdba7a5058f->hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/4a7671e9b98a410894b741122d58b322-top, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-805459ed1d5d492e8c1f6e5de6bab2fc, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-f5a5d31a6fb84723ac85f5d6a665c9f7, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-eced3d5057fa4eaaba43f9a67d348594] into tmpdir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp, totalSize=120.8 K 2024-11-13T11:30:20,735 DEBUG [RS:0;7bf281cf3991:38217-longCompactions-0 {}] compactions.Compactor(225): Compacting 4a7671e9b98a410894b741122d58b322.fc299128ef471b6596e90fdba7a5058f, keycount=33, bloomtype=ROW, size=75.7 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1731497405686 2024-11-13T11:30:20,735 DEBUG [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8. 2024-11-13T11:30:20,735 INFO [RS_OPEN_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8. 2024-11-13T11:30:20,735 DEBUG [RS:0;7bf281cf3991:38217-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-805459ed1d5d492e8c1f6e5de6bab2fc, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1731497419914 2024-11-13T11:30:20,736 DEBUG [RS:0;7bf281cf3991:38217-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-f5a5d31a6fb84723ac85f5d6a665c9f7, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1731497419938 2024-11-13T11:30:20,736 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=5e76b6ca748a25342e27876a7fc094c8, regionState=OPEN, openSeqNum=127, regionLocation=7bf281cf3991,38217,1731497394782 2024-11-13T11:30:20,736 DEBUG [RS:0;7bf281cf3991:38217-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-eced3d5057fa4eaaba43f9a67d348594, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731497419971 2024-11-13T11:30:20,738 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5e76b6ca748a25342e27876a7fc094c8, server=7bf281cf3991,38217,1731497394782 because future has completed 2024-11-13T11:30:20,740 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 343252d4dd2ed97f4f7c754befd7d1d4/info of 343252d4dd2ed97f4f7c754befd7d1d4 into adb5cce60a5f4c9a9e5f36407e9d3689(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T11:30:20,740 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 343252d4dd2ed97f4f7c754befd7d1d4: 2024-11-13T11:30:20,740 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4., storeName=343252d4dd2ed97f4f7c754befd7d1d4/info, priority=15, startTime=1731497420684; duration=0sec 2024-11-13T11:30:20,740 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:30:20,740 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 343252d4dd2ed97f4f7c754befd7d1d4:info 2024-11-13T11:30:20,742 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/.tmp/ns/7fe5dad4e83546b8a87a64ad5c557f6b is 43, key is default/ns:d/1731497395567/Put/seqid=0 2024-11-13T11:30:20,743 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-13T11:30:20,743 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 5e76b6ca748a25342e27876a7fc094c8, server=7bf281cf3991,38217,1731497394782 in 240 msec 2024-11-13T11:30:20,746 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-13T11:30:20,746 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5e76b6ca748a25342e27876a7fc094c8, ASSIGN in 403 msec 2024-11-13T11:30:20,748 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=fc299128ef471b6596e90fdba7a5058f, daughterA=343252d4dd2ed97f4f7c754befd7d1d4, daughterB=5e76b6ca748a25342e27876a7fc094c8 in 761 msec 2024-11-13T11:30:20,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741853_1029 (size=5153) 2024-11-13T11:30:20,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741853_1029 (size=5153) 2024-11-13T11:30:20,751 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/.tmp/ns/7fe5dad4e83546b8a87a64ad5c557f6b 2024-11-13T11:30:20,770 INFO [RS:0;7bf281cf3991:38217-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e76b6ca748a25342e27876a7fc094c8#info#compaction#67 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T11:30:20,771 DEBUG [RS:0;7bf281cf3991:38217-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/ba41fc2c85a44bf9a1c4d26cc422ff32 is 1080, key is row0062/info:/1731497417893/Put/seqid=0 2024-11-13T11:30:20,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741854_1030 (size=43081) 2024-11-13T11:30:20,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741854_1030 (size=43081) 2024-11-13T11:30:20,781 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/.tmp/table/bf14ec841c5d4bf2b5a8c06460527888 is 65, key is TestLogRolling-testLogRolling/table:state/1731497395991/Put/seqid=0 2024-11-13T11:30:20,781 DEBUG [RS:0;7bf281cf3991:38217-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/ba41fc2c85a44bf9a1c4d26cc422ff32 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/ba41fc2c85a44bf9a1c4d26cc422ff32 2024-11-13T11:30:20,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741855_1031 (size=5340) 2024-11-13T11:30:20,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741855_1031 (size=5340) 2024-11-13T11:30:20,786 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/.tmp/table/bf14ec841c5d4bf2b5a8c06460527888 2024-11-13T11:30:20,787 INFO [RS:0;7bf281cf3991:38217-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 5e76b6ca748a25342e27876a7fc094c8/info of 5e76b6ca748a25342e27876a7fc094c8 into ba41fc2c85a44bf9a1c4d26cc422ff32(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T11:30:20,787 DEBUG [RS:0;7bf281cf3991:38217-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:30:20,787 INFO [RS:0;7bf281cf3991:38217-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8., storeName=5e76b6ca748a25342e27876a7fc094c8/info, priority=12, startTime=1731497420733; duration=0sec 2024-11-13T11:30:20,787 DEBUG [RS:0;7bf281cf3991:38217-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:30:20,787 DEBUG [RS:0;7bf281cf3991:38217-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e76b6ca748a25342e27876a7fc094c8:info 2024-11-13T11:30:20,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/.tmp/info/048f06dff3e842dea5887a65259776af as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/info/048f06dff3e842dea5887a65259776af 2024-11-13T11:30:20,796 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/info/048f06dff3e842dea5887a65259776af, entries=30, sequenceid=17, filesize=9.6 K 2024-11-13T11:30:20,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/.tmp/ns/7fe5dad4e83546b8a87a64ad5c557f6b as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/ns/7fe5dad4e83546b8a87a64ad5c557f6b 2024-11-13T11:30:20,802 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/ns/7fe5dad4e83546b8a87a64ad5c557f6b, entries=2, sequenceid=17, filesize=5.0 K 2024-11-13T11:30:20,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/.tmp/table/bf14ec841c5d4bf2b5a8c06460527888 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/table/bf14ec841c5d4bf2b5a8c06460527888 2024-11-13T11:30:20,808 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/table/bf14ec841c5d4bf2b5a8c06460527888, entries=2, sequenceid=17, filesize=5.2 K 2024-11-13T11:30:20,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:20,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:20,810 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 119ms, sequenceid=17, compaction requested=false 2024-11-13T11:30:20,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-13T11:30:21,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:21,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:21,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:44852 deadline: 1731497431991, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f. is not online on 7bf281cf3991,38217,1731497394782 2024-11-13T11:30:21,995 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f., hostname=7bf281cf3991,38217,1731497394782, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f., hostname=7bf281cf3991,38217,1731497394782, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f. is not online on 7bf281cf3991,38217,1731497394782 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T11:30:21,995 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f., hostname=7bf281cf3991,38217,1731497394782, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f. is not online on 7bf281cf3991,38217,1731497394782 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T11:30:21,995 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731497395613.fc299128ef471b6596e90fdba7a5058f., hostname=7bf281cf3991,38217,1731497394782, seqNum=2 from cache 2024-11-13T11:30:22,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:22,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:23,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:23,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:24,720 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T11:30:24,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:24,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:25,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,211 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,211 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,235 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,235 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,236 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,236 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,236 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,236 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,239 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,239 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,239 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,242 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,749 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T11:30:25,754 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,755 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,758 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,758 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,778 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,781 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,781 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,781 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T11:30:25,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:25,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:26,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:26,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:27,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:27,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:28,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:28,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:29,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:29,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:30,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:30,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:31,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:31,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:32,087 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8., hostname=7bf281cf3991,38217,1731497394782, seqNum=127] 2024-11-13T11:30:32,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:32,098 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e76b6ca748a25342e27876a7fc094c8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T11:30:32,102 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/03baa6362aa24bc08fef1dcb65162d50 is 1080, key is row0097/info:/1731497432088/Put/seqid=0 2024-11-13T11:30:32,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741856_1032 (size=12516) 2024-11-13T11:30:32,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741856_1032 (size=12516) 2024-11-13T11:30:32,107 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/03baa6362aa24bc08fef1dcb65162d50 2024-11-13T11:30:32,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/03baa6362aa24bc08fef1dcb65162d50 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/03baa6362aa24bc08fef1dcb65162d50 2024-11-13T11:30:32,119 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/03baa6362aa24bc08fef1dcb65162d50, entries=7, sequenceid=137, filesize=12.2 K 2024-11-13T11:30:32,120 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 5e76b6ca748a25342e27876a7fc094c8 in 22ms, sequenceid=137, compaction requested=false 2024-11-13T11:30:32,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:30:32,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:32,121 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e76b6ca748a25342e27876a7fc094c8 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-13T11:30:32,127 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/a01fce08f6214f53807d667d8411de84 is 1080, key is row0104/info:/1731497432099/Put/seqid=0 2024-11-13T11:30:32,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741857_1033 (size=17906) 2024-11-13T11:30:32,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741857_1033 (size=17906) 2024-11-13T11:30:32,132 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/a01fce08f6214f53807d667d8411de84 2024-11-13T11:30:32,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/a01fce08f6214f53807d667d8411de84 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/a01fce08f6214f53807d667d8411de84 2024-11-13T11:30:32,144 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/a01fce08f6214f53807d667d8411de84, entries=12, sequenceid=152, filesize=17.5 K 2024-11-13T11:30:32,145 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 5e76b6ca748a25342e27876a7fc094c8 in 24ms, sequenceid=152, compaction requested=true 2024-11-13T11:30:32,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:30:32,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e76b6ca748a25342e27876a7fc094c8:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T11:30:32,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:30:32,145 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T11:30:32,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:32,146 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e76b6ca748a25342e27876a7fc094c8 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-13T11:30:32,146 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 73503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T11:30:32,146 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1541): 5e76b6ca748a25342e27876a7fc094c8/info is initiating minor compaction (all files) 2024-11-13T11:30:32,146 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 5e76b6ca748a25342e27876a7fc094c8/info in TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8. 2024-11-13T11:30:32,146 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/ba41fc2c85a44bf9a1c4d26cc422ff32, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/03baa6362aa24bc08fef1dcb65162d50, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/a01fce08f6214f53807d667d8411de84] into tmpdir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp, totalSize=71.8 K 2024-11-13T11:30:32,147 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting ba41fc2c85a44bf9a1c4d26cc422ff32, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731497417893 2024-11-13T11:30:32,147 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting 03baa6362aa24bc08fef1dcb65162d50, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731497432088 2024-11-13T11:30:32,147 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting a01fce08f6214f53807d667d8411de84, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1731497432099 2024-11-13T11:30:32,150 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/ac11202283e5466aa96a307126a00570 is 1080, key is row0116/info:/1731497432123/Put/seqid=0 2024-11-13T11:30:32,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741858_1034 (size=16828) 2024-11-13T11:30:32,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741858_1034 (size=16828) 2024-11-13T11:30:32,156 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/ac11202283e5466aa96a307126a00570 2024-11-13T11:30:32,162 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e76b6ca748a25342e27876a7fc094c8#info#compaction#72 average throughput is 27.71 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T11:30:32,163 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/df1ee93f8fe54c2ea8b47d9125a6b0c4 is 1080, key is row0062/info:/1731497417893/Put/seqid=0 2024-11-13T11:30:32,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/ac11202283e5466aa96a307126a00570 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/ac11202283e5466aa96a307126a00570 2024-11-13T11:30:32,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741859_1035 (size=63733) 2024-11-13T11:30:32,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741859_1035 (size=63733) 2024-11-13T11:30:32,169 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/ac11202283e5466aa96a307126a00570, entries=11, sequenceid=166, filesize=16.4 K 2024-11-13T11:30:32,170 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=2.10 KB/2152 for 5e76b6ca748a25342e27876a7fc094c8 in 24ms, sequenceid=166, compaction requested=false 2024-11-13T11:30:32,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:30:32,174 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/df1ee93f8fe54c2ea8b47d9125a6b0c4 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/df1ee93f8fe54c2ea8b47d9125a6b0c4 2024-11-13T11:30:32,180 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 5e76b6ca748a25342e27876a7fc094c8/info of 5e76b6ca748a25342e27876a7fc094c8 into df1ee93f8fe54c2ea8b47d9125a6b0c4(size=62.2 K), total size for store is 78.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T11:30:32,180 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:30:32,181 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8., storeName=5e76b6ca748a25342e27876a7fc094c8/info, priority=13, startTime=1731497432145; duration=0sec 2024-11-13T11:30:32,181 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:30:32,181 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e76b6ca748a25342e27876a7fc094c8:info 2024-11-13T11:30:32,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:32,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:33,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:33,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:34,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:34,161 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e76b6ca748a25342e27876a7fc094c8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T11:30:34,165 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/39c845e1ecc5478ebb0af9f0be29f130 is 1080, key is row0127/info:/1731497432147/Put/seqid=0 2024-11-13T11:30:34,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741860_1036 (size=12516) 2024-11-13T11:30:34,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741860_1036 (size=12516) 2024-11-13T11:30:34,205 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=5e76b6ca748a25342e27876a7fc094c8, server=7bf281cf3991,38217,1731497394782 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-13T11:30:34,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:44852 deadline: 1731497444204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=5e76b6ca748a25342e27876a7fc094c8, server=7bf281cf3991,38217,1731497394782 2024-11-13T11:30:34,206 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8., hostname=7bf281cf3991,38217,1731497394782, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8., hostname=7bf281cf3991,38217,1731497394782, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=5e76b6ca748a25342e27876a7fc094c8, server=7bf281cf3991,38217,1731497394782 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T11:30:34,206 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8., hostname=7bf281cf3991,38217,1731497394782, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=5e76b6ca748a25342e27876a7fc094c8, server=7bf281cf3991,38217,1731497394782 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T11:30:34,206 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8., hostname=7bf281cf3991,38217,1731497394782, seqNum=127 because the exception is null or not the one we care about 2024-11-13T11:30:34,571 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/39c845e1ecc5478ebb0af9f0be29f130 2024-11-13T11:30:34,577 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/39c845e1ecc5478ebb0af9f0be29f130 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/39c845e1ecc5478ebb0af9f0be29f130 2024-11-13T11:30:34,581 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/39c845e1ecc5478ebb0af9f0be29f130, entries=7, sequenceid=177, filesize=12.2 K 2024-11-13T11:30:34,582 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 5e76b6ca748a25342e27876a7fc094c8 in 422ms, sequenceid=177, compaction requested=true 2024-11-13T11:30:34,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:30:34,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e76b6ca748a25342e27876a7fc094c8:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T11:30:34,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:30:34,582 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T11:30:34,584 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93077 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T11:30:34,584 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1541): 5e76b6ca748a25342e27876a7fc094c8/info is initiating minor compaction (all files) 2024-11-13T11:30:34,584 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 5e76b6ca748a25342e27876a7fc094c8/info in TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8. 2024-11-13T11:30:34,584 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/df1ee93f8fe54c2ea8b47d9125a6b0c4, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/ac11202283e5466aa96a307126a00570, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/39c845e1ecc5478ebb0af9f0be29f130] into tmpdir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp, totalSize=90.9 K 2024-11-13T11:30:34,584 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting df1ee93f8fe54c2ea8b47d9125a6b0c4, keycount=54, bloomtype=ROW, size=62.2 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1731497417893 2024-11-13T11:30:34,584 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting ac11202283e5466aa96a307126a00570, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1731497432123 2024-11-13T11:30:34,585 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting 39c845e1ecc5478ebb0af9f0be29f130, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1731497432147 2024-11-13T11:30:34,595 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e76b6ca748a25342e27876a7fc094c8#info#compaction#74 average throughput is 36.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T11:30:34,596 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/3498726d2dfb4d46b67757591f7fd68a is 1080, key is row0062/info:/1731497417893/Put/seqid=0 2024-11-13T11:30:34,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741861_1037 (size=83312) 2024-11-13T11:30:34,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741861_1037 (size=83312) 2024-11-13T11:30:34,606 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/3498726d2dfb4d46b67757591f7fd68a as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/3498726d2dfb4d46b67757591f7fd68a 2024-11-13T11:30:34,611 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 5e76b6ca748a25342e27876a7fc094c8/info of 5e76b6ca748a25342e27876a7fc094c8 into 3498726d2dfb4d46b67757591f7fd68a(size=81.4 K), total size for store is 81.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T11:30:34,612 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:30:34,612 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8., storeName=5e76b6ca748a25342e27876a7fc094c8/info, priority=13, startTime=1731497434582; duration=0sec 2024-11-13T11:30:34,612 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:30:34,612 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e76b6ca748a25342e27876a7fc094c8:info 2024-11-13T11:30:34,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:34,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:35,581 INFO [master/7bf281cf3991:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-13T11:30:35,581 INFO [master/7bf281cf3991:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-13T11:30:35,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:35,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:36,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:36,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:37,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:37,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:38,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:38,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:39,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:39,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:40,536 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-13T11:30:40,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:40,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:41,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:41,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:42,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:42,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:43,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:43,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:44,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:44,251 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e76b6ca748a25342e27876a7fc094c8 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-13T11:30:44,258 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/899f96c772e945d1be4a302645eed1d1 is 1080, key is row0134/info:/1731497434162/Put/seqid=0 2024-11-13T11:30:44,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741862_1038 (size=29784) 2024-11-13T11:30:44,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741862_1038 (size=29784) 2024-11-13T11:30:44,268 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/899f96c772e945d1be4a302645eed1d1 2024-11-13T11:30:44,276 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/899f96c772e945d1be4a302645eed1d1 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/899f96c772e945d1be4a302645eed1d1 2024-11-13T11:30:44,282 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/899f96c772e945d1be4a302645eed1d1, entries=23, sequenceid=204, filesize=29.1 K 2024-11-13T11:30:44,283 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=4.20 KB/4304 for 5e76b6ca748a25342e27876a7fc094c8 in 32ms, sequenceid=204, compaction requested=false 2024-11-13T11:30:44,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:30:44,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:44,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:45,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:45,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:46,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:46,268 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e76b6ca748a25342e27876a7fc094c8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T11:30:46,274 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/9d13cda9a3bd4ad7bee2061205f53a85 is 1080, key is row0157/info:/1731497444253/Put/seqid=0 2024-11-13T11:30:46,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741863_1039 (size=12516) 2024-11-13T11:30:46,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741863_1039 (size=12516) 2024-11-13T11:30:46,284 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/9d13cda9a3bd4ad7bee2061205f53a85 2024-11-13T11:30:46,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/9d13cda9a3bd4ad7bee2061205f53a85 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/9d13cda9a3bd4ad7bee2061205f53a85 2024-11-13T11:30:46,296 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/9d13cda9a3bd4ad7bee2061205f53a85, entries=7, sequenceid=214, filesize=12.2 K 2024-11-13T11:30:46,297 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 5e76b6ca748a25342e27876a7fc094c8 in 29ms, sequenceid=214, compaction requested=true 2024-11-13T11:30:46,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:30:46,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e76b6ca748a25342e27876a7fc094c8:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T11:30:46,298 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T11:30:46,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:30:46,299 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 125612 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T11:30:46,299 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1541): 5e76b6ca748a25342e27876a7fc094c8/info is initiating minor compaction (all files) 2024-11-13T11:30:46,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:46,299 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 5e76b6ca748a25342e27876a7fc094c8/info in TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8. 2024-11-13T11:30:46,299 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e76b6ca748a25342e27876a7fc094c8 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-13T11:30:46,299 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/3498726d2dfb4d46b67757591f7fd68a, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/899f96c772e945d1be4a302645eed1d1, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/9d13cda9a3bd4ad7bee2061205f53a85] into tmpdir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp, totalSize=122.7 K 2024-11-13T11:30:46,299 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3498726d2dfb4d46b67757591f7fd68a, keycount=72, bloomtype=ROW, size=81.4 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1731497417893 2024-11-13T11:30:46,300 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting 899f96c772e945d1be4a302645eed1d1, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1731497434162 2024-11-13T11:30:46,300 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9d13cda9a3bd4ad7bee2061205f53a85, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1731497444253 2024-11-13T11:30:46,306 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/51187e41368045169f93948bc343c904 is 1080, key is row0164/info:/1731497446270/Put/seqid=0 2024-11-13T11:30:46,321 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e76b6ca748a25342e27876a7fc094c8#info#compaction#78 average throughput is 34.89 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T11:30:46,321 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/a2631fbda2e94ee0b64a03f1db222ae8 is 1080, key is row0062/info:/1731497417893/Put/seqid=0 2024-11-13T11:30:46,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741864_1040 (size=20078) 2024-11-13T11:30:46,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741864_1040 (size=20078) 2024-11-13T11:30:46,329 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/51187e41368045169f93948bc343c904 2024-11-13T11:30:46,343 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/51187e41368045169f93948bc343c904 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/51187e41368045169f93948bc343c904 2024-11-13T11:30:46,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741865_1041 (size=115762) 2024-11-13T11:30:46,354 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/51187e41368045169f93948bc343c904, entries=14, sequenceid=231, filesize=19.6 K 2024-11-13T11:30:46,355 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for 5e76b6ca748a25342e27876a7fc094c8 in 56ms, sequenceid=231, compaction requested=false 2024-11-13T11:30:46,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:30:46,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741865_1041 (size=115762) 2024-11-13T11:30:46,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:46,359 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e76b6ca748a25342e27876a7fc094c8 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-13T11:30:46,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/550b4390664e4aafb76ed4a5c031136c is 1080, key is row0178/info:/1731497446301/Put/seqid=0 2024-11-13T11:30:46,366 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/a2631fbda2e94ee0b64a03f1db222ae8 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/a2631fbda2e94ee0b64a03f1db222ae8 2024-11-13T11:30:46,374 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 5e76b6ca748a25342e27876a7fc094c8/info of 5e76b6ca748a25342e27876a7fc094c8 into a2631fbda2e94ee0b64a03f1db222ae8(size=113.0 K), total size for store is 132.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T11:30:46,374 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:30:46,374 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8., storeName=5e76b6ca748a25342e27876a7fc094c8/info, priority=13, startTime=1731497446297; duration=0sec 2024-11-13T11:30:46,374 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:30:46,375 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e76b6ca748a25342e27876a7fc094c8:info 2024-11-13T11:30:46,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741866_1042 (size=19000) 2024-11-13T11:30:46,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741866_1042 (size=19000) 2024-11-13T11:30:46,378 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/550b4390664e4aafb76ed4a5c031136c 2024-11-13T11:30:46,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/550b4390664e4aafb76ed4a5c031136c as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/550b4390664e4aafb76ed4a5c031136c 2024-11-13T11:30:46,387 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/550b4390664e4aafb76ed4a5c031136c, entries=13, sequenceid=247, filesize=18.6 K 2024-11-13T11:30:46,388 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=2.10 KB/2152 for 5e76b6ca748a25342e27876a7fc094c8 in 29ms, sequenceid=247, compaction requested=true 2024-11-13T11:30:46,388 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:30:46,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e76b6ca748a25342e27876a7fc094c8:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T11:30:46,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:30:46,389 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T11:30:46,390 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 154840 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T11:30:46,390 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1541): 5e76b6ca748a25342e27876a7fc094c8/info is initiating minor compaction (all files) 2024-11-13T11:30:46,390 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 5e76b6ca748a25342e27876a7fc094c8/info in TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8. 2024-11-13T11:30:46,390 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/a2631fbda2e94ee0b64a03f1db222ae8, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/51187e41368045169f93948bc343c904, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/550b4390664e4aafb76ed4a5c031136c] into tmpdir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp, totalSize=151.2 K 2024-11-13T11:30:46,390 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting a2631fbda2e94ee0b64a03f1db222ae8, keycount=102, bloomtype=ROW, size=113.0 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1731497417893 2024-11-13T11:30:46,391 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting 51187e41368045169f93948bc343c904, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1731497446270 2024-11-13T11:30:46,391 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting 550b4390664e4aafb76ed4a5c031136c, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1731497446301 2024-11-13T11:30:46,405 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e76b6ca748a25342e27876a7fc094c8#info#compaction#80 average throughput is 44.12 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T11:30:46,405 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/f73c194a7be44098a18b3651714d90d8 is 1080, key is row0062/info:/1731497417893/Put/seqid=0 2024-11-13T11:30:46,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741867_1043 (size=145175) 2024-11-13T11:30:46,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741867_1043 (size=145175) 2024-11-13T11:30:46,418 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/f73c194a7be44098a18b3651714d90d8 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/f73c194a7be44098a18b3651714d90d8 2024-11-13T11:30:46,428 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 5e76b6ca748a25342e27876a7fc094c8/info of 5e76b6ca748a25342e27876a7fc094c8 into f73c194a7be44098a18b3651714d90d8(size=141.8 K), total size for store is 141.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T11:30:46,428 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:30:46,428 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8., storeName=5e76b6ca748a25342e27876a7fc094c8/info, priority=13, startTime=1731497446389; duration=0sec 2024-11-13T11:30:46,428 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:30:46,429 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e76b6ca748a25342e27876a7fc094c8:info 2024-11-13T11:30:46,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:46,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:47,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:47,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:48,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:48,376 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e76b6ca748a25342e27876a7fc094c8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T11:30:48,380 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/0e36b3e7049a4c7bb4e47b2349663133 is 1080, key is row0191/info:/1731497446361/Put/seqid=0 2024-11-13T11:30:48,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741868_1044 (size=12519) 2024-11-13T11:30:48,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741868_1044 (size=12519) 2024-11-13T11:30:48,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=5e76b6ca748a25342e27876a7fc094c8, server=7bf281cf3991,38217,1731497394782 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-13T11:30:48,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:44852 deadline: 1731497458415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=5e76b6ca748a25342e27876a7fc094c8, server=7bf281cf3991,38217,1731497394782 2024-11-13T11:30:48,416 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8., hostname=7bf281cf3991,38217,1731497394782, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8., hostname=7bf281cf3991,38217,1731497394782, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=5e76b6ca748a25342e27876a7fc094c8, server=7bf281cf3991,38217,1731497394782 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T11:30:48,416 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8., hostname=7bf281cf3991,38217,1731497394782, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=5e76b6ca748a25342e27876a7fc094c8, server=7bf281cf3991,38217,1731497394782 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T11:30:48,416 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8., hostname=7bf281cf3991,38217,1731497394782, seqNum=127 because the exception is null or not the one we care about 2024-11-13T11:30:48,788 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/0e36b3e7049a4c7bb4e47b2349663133 2024-11-13T11:30:48,793 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/0e36b3e7049a4c7bb4e47b2349663133 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/0e36b3e7049a4c7bb4e47b2349663133 2024-11-13T11:30:48,799 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/0e36b3e7049a4c7bb4e47b2349663133, entries=7, sequenceid=259, filesize=12.2 K 2024-11-13T11:30:48,800 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 5e76b6ca748a25342e27876a7fc094c8 in 424ms, sequenceid=259, compaction requested=false 2024-11-13T11:30:48,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:30:48,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:48,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:49,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:49,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:50,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:50,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:51,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:51,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:52,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:52,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:53,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:53,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:54,720 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T11:30:54,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:54,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:55,663 DEBUG [master/7bf281cf3991:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-13T11:30:55,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:55,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:56,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:56,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:57,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:57,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:58,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:30:58,505 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e76b6ca748a25342e27876a7fc094c8 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-13T11:30:58,514 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/b728cd31f39149b8ad466db46f7f97f3 is 1080, key is row0198/info:/1731497448377/Put/seqid=0 2024-11-13T11:30:58,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741869_1045 (size=29807) 2024-11-13T11:30:58,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741869_1045 (size=29807) 2024-11-13T11:30:58,522 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/b728cd31f39149b8ad466db46f7f97f3 2024-11-13T11:30:58,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/b728cd31f39149b8ad466db46f7f97f3 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/b728cd31f39149b8ad466db46f7f97f3 2024-11-13T11:30:58,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/b728cd31f39149b8ad466db46f7f97f3, entries=23, sequenceid=285, filesize=29.1 K 2024-11-13T11:30:58,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=4.20 KB/4304 for 5e76b6ca748a25342e27876a7fc094c8 in 30ms, sequenceid=285, compaction requested=true 2024-11-13T11:30:58,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:30:58,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e76b6ca748a25342e27876a7fc094c8:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T11:30:58,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:30:58,535 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T11:30:58,536 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 187501 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T11:30:58,536 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1541): 5e76b6ca748a25342e27876a7fc094c8/info is initiating minor compaction (all files) 2024-11-13T11:30:58,536 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 5e76b6ca748a25342e27876a7fc094c8/info in TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8. 2024-11-13T11:30:58,536 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/f73c194a7be44098a18b3651714d90d8, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/0e36b3e7049a4c7bb4e47b2349663133, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/b728cd31f39149b8ad466db46f7f97f3] into tmpdir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp, totalSize=183.1 K 2024-11-13T11:30:58,536 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting f73c194a7be44098a18b3651714d90d8, keycount=129, bloomtype=ROW, size=141.8 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1731497417893 2024-11-13T11:30:58,537 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0e36b3e7049a4c7bb4e47b2349663133, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1731497446361 2024-11-13T11:30:58,537 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting b728cd31f39149b8ad466db46f7f97f3, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1731497448377 2024-11-13T11:30:58,547 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e76b6ca748a25342e27876a7fc094c8#info#compaction#83 average throughput is 81.58 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T11:30:58,548 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/dfbd71bc1f3f440a8e865ab916d052e4 is 1080, key is row0062/info:/1731497417893/Put/seqid=0 2024-11-13T11:30:58,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741870_1046 (size=177651) 2024-11-13T11:30:58,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741870_1046 (size=177651) 2024-11-13T11:30:58,555 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/dfbd71bc1f3f440a8e865ab916d052e4 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/dfbd71bc1f3f440a8e865ab916d052e4 2024-11-13T11:30:58,561 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 5e76b6ca748a25342e27876a7fc094c8/info of 5e76b6ca748a25342e27876a7fc094c8 into dfbd71bc1f3f440a8e865ab916d052e4(size=173.5 K), total size for store is 173.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T11:30:58,561 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:30:58,561 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8., storeName=5e76b6ca748a25342e27876a7fc094c8/info, priority=13, startTime=1731497458535; duration=0sec 2024-11-13T11:30:58,561 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:30:58,561 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e76b6ca748a25342e27876a7fc094c8:info 2024-11-13T11:30:58,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:58,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:59,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:30:59,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:31:00,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:31:00,523 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e76b6ca748a25342e27876a7fc094c8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T11:31:00,529 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/f6801a811f9849e0bb6549d17652db60 is 1080, key is row0221/info:/1731497458509/Put/seqid=0 2024-11-13T11:31:00,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741871_1047 (size=12523) 2024-11-13T11:31:00,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741871_1047 (size=12523) 2024-11-13T11:31:00,536 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/f6801a811f9849e0bb6549d17652db60 2024-11-13T11:31:00,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/f6801a811f9849e0bb6549d17652db60 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/f6801a811f9849e0bb6549d17652db60 2024-11-13T11:31:00,547 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/f6801a811f9849e0bb6549d17652db60, entries=7, sequenceid=296, filesize=12.2 K 2024-11-13T11:31:00,548 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 5e76b6ca748a25342e27876a7fc094c8 in 25ms, sequenceid=296, compaction requested=false 2024-11-13T11:31:00,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:31:00,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:31:00,549 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e76b6ca748a25342e27876a7fc094c8 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-13T11:31:00,552 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/a5d613c9cad746ac8554d118804fa125 is 1080, key is row0228/info:/1731497460524/Put/seqid=0 2024-11-13T11:31:00,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741872_1048 (size=19013) 2024-11-13T11:31:00,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741872_1048 (size=19013) 2024-11-13T11:31:00,557 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/a5d613c9cad746ac8554d118804fa125 2024-11-13T11:31:00,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/a5d613c9cad746ac8554d118804fa125 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/a5d613c9cad746ac8554d118804fa125 2024-11-13T11:31:00,568 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/a5d613c9cad746ac8554d118804fa125, entries=13, sequenceid=312, filesize=18.6 K 2024-11-13T11:31:00,569 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=10.51 KB/10760 for 5e76b6ca748a25342e27876a7fc094c8 in 20ms, sequenceid=312, compaction requested=true 2024-11-13T11:31:00,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:31:00,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e76b6ca748a25342e27876a7fc094c8:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T11:31:00,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:31:00,569 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T11:31:00,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38217 {}] regionserver.HRegion(8855): Flush requested on 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:31:00,570 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e76b6ca748a25342e27876a7fc094c8 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-13T11:31:00,570 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 209187 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T11:31:00,570 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1541): 5e76b6ca748a25342e27876a7fc094c8/info is initiating minor compaction (all files) 2024-11-13T11:31:00,570 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 5e76b6ca748a25342e27876a7fc094c8/info in TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8. 2024-11-13T11:31:00,571 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/dfbd71bc1f3f440a8e865ab916d052e4, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/f6801a811f9849e0bb6549d17652db60, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/a5d613c9cad746ac8554d118804fa125] into tmpdir=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp, totalSize=204.3 K 2024-11-13T11:31:00,571 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting dfbd71bc1f3f440a8e865ab916d052e4, keycount=159, bloomtype=ROW, size=173.5 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1731497417893 2024-11-13T11:31:00,571 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting f6801a811f9849e0bb6549d17652db60, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1731497458509 2024-11-13T11:31:00,571 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] compactions.Compactor(225): Compacting a5d613c9cad746ac8554d118804fa125, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1731497460524 2024-11-13T11:31:00,573 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/8775b623dc70472f932c0329410aa510 is 1080, key is row0241/info:/1731497460549/Put/seqid=0 2024-11-13T11:31:00,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741873_1049 (size=16839) 2024-11-13T11:31:00,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741873_1049 (size=16839) 2024-11-13T11:31:00,579 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/8775b623dc70472f932c0329410aa510 2024-11-13T11:31:00,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/8775b623dc70472f932c0329410aa510 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/8775b623dc70472f932c0329410aa510 2024-11-13T11:31:00,587 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e76b6ca748a25342e27876a7fc094c8#info#compaction#87 average throughput is 36.74 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T11:31:00,587 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/bb959a6aab364381800f7bf0e7be512c is 1080, key is row0062/info:/1731497417893/Put/seqid=0 2024-11-13T11:31:00,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741874_1050 (size=199337) 2024-11-13T11:31:00,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741874_1050 (size=199337) 2024-11-13T11:31:00,590 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/8775b623dc70472f932c0329410aa510, entries=11, sequenceid=326, filesize=16.4 K 2024-11-13T11:31:00,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=5.25 KB/5380 for 5e76b6ca748a25342e27876a7fc094c8 in 21ms, sequenceid=326, compaction requested=false 2024-11-13T11:31:00,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:31:00,594 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/bb959a6aab364381800f7bf0e7be512c as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/bb959a6aab364381800f7bf0e7be512c 2024-11-13T11:31:00,600 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 5e76b6ca748a25342e27876a7fc094c8/info of 5e76b6ca748a25342e27876a7fc094c8 into bb959a6aab364381800f7bf0e7be512c(size=194.7 K), total size for store is 211.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T11:31:00,600 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:31:00,600 INFO [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8., storeName=5e76b6ca748a25342e27876a7fc094c8/info, priority=13, startTime=1731497460569; duration=0sec 2024-11-13T11:31:00,600 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T11:31:00,600 DEBUG [RS:0;7bf281cf3991:38217-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e76b6ca748a25342e27876a7fc094c8:info 2024-11-13T11:31:00,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:31:00,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:31:01,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:31:01,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:31:02,581 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-13T11:31:02,582 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C38217%2C1731497394782.1731497462582 2024-11-13T11:31:02,603 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:02,603 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:02,603 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:02,603 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:02,603 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:02,604 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/WALs/7bf281cf3991,38217,1731497394782/7bf281cf3991%2C38217%2C1731497394782.1731497395160 with entries=312, filesize=308.51 KB; new WAL /user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/WALs/7bf281cf3991,38217,1731497394782/7bf281cf3991%2C38217%2C1731497394782.1731497462582 2024-11-13T11:31:02,604 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33035:33035),(127.0.0.1/127.0.0.1:46625:46625)] 2024-11-13T11:31:02,604 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/WALs/7bf281cf3991,38217,1731497394782/7bf281cf3991%2C38217%2C1731497394782.1731497395160 is not closed yet, will try archiving it next time 2024-11-13T11:31:02,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741833_1009 (size=315921) 2024-11-13T11:31:02,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741833_1009 (size=315921) 2024-11-13T11:31:02,608 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 5e76b6ca748a25342e27876a7fc094c8 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-11-13T11:31:02,612 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/d6a4040dbfb64373beb6b862b95ea5b3 is 1080, key is row0252/info:/1731497460571/Put/seqid=0 2024-11-13T11:31:02,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741876_1052 (size=10357) 2024-11-13T11:31:02,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741876_1052 (size=10357) 2024-11-13T11:31:02,618 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/d6a4040dbfb64373beb6b862b95ea5b3 2024-11-13T11:31:02,624 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/.tmp/info/d6a4040dbfb64373beb6b862b95ea5b3 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/d6a4040dbfb64373beb6b862b95ea5b3 2024-11-13T11:31:02,629 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/d6a4040dbfb64373beb6b862b95ea5b3, entries=5, sequenceid=335, filesize=10.1 K 2024-11-13T11:31:02,631 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for 5e76b6ca748a25342e27876a7fc094c8 in 23ms, sequenceid=335, compaction requested=true 2024-11-13T11:31:02,631 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 5e76b6ca748a25342e27876a7fc094c8: 2024-11-13T11:31:02,631 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-13T11:31:02,636 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/.tmp/info/8dfdf269ddb14e4b88c53f37275ce329 is 193, key is TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8./info:regioninfo/1731497420736/Put/seqid=0 2024-11-13T11:31:02,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741877_1053 (size=6223) 2024-11-13T11:31:02,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741877_1053 (size=6223) 2024-11-13T11:31:02,641 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/.tmp/info/8dfdf269ddb14e4b88c53f37275ce329 2024-11-13T11:31:02,646 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/.tmp/info/8dfdf269ddb14e4b88c53f37275ce329 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/info/8dfdf269ddb14e4b88c53f37275ce329 2024-11-13T11:31:02,651 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/info/8dfdf269ddb14e4b88c53f37275ce329, entries=5, sequenceid=21, filesize=6.1 K 2024-11-13T11:31:02,652 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 21ms, sequenceid=21, compaction requested=false 2024-11-13T11:31:02,652 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-13T11:31:02,652 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 343252d4dd2ed97f4f7c754befd7d1d4: 2024-11-13T11:31:02,652 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C38217%2C1731497394782.1731497462652 2024-11-13T11:31:02,657 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:02,657 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:02,657 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:02,657 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:02,657 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:02,657 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/WALs/7bf281cf3991,38217,1731497394782/7bf281cf3991%2C38217%2C1731497394782.1731497462582 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/WALs/7bf281cf3991,38217,1731497394782/7bf281cf3991%2C38217%2C1731497394782.1731497462652 2024-11-13T11:31:02,658 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46625:46625),(127.0.0.1/127.0.0.1:33035:33035)] 2024-11-13T11:31:02,658 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/WALs/7bf281cf3991,38217,1731497394782/7bf281cf3991%2C38217%2C1731497394782.1731497462582 is not closed yet, will try archiving it next time 2024-11-13T11:31:02,659 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/WALs/7bf281cf3991,38217,1731497394782/7bf281cf3991%2C38217%2C1731497394782.1731497395160 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/oldWALs/7bf281cf3991%2C38217%2C1731497394782.1731497395160 2024-11-13T11:31:02,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741875_1051 (size=731) 2024-11-13T11:31:02,659 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T11:31:02,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741875_1051 (size=731) 2024-11-13T11:31:02,660 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/WALs/7bf281cf3991,38217,1731497394782/7bf281cf3991%2C38217%2C1731497394782.1731497462582 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/oldWALs/7bf281cf3991%2C38217%2C1731497394782.1731497462582 2024-11-13T11:31:02,759 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T11:31:02,760 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T11:31:02,760 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:31:02,760 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:31:02,760 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:31:02,761 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T11:31:02,761 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T11:31:02,761 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1103943411, stopped=false 2024-11-13T11:31:02,761 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7bf281cf3991,45517,1731497394742 2024-11-13T11:31:02,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T11:31:02,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T11:31:02,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:31:02,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:31:02,764 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T11:31:02,764 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T11:31:02,765 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:31:02,765 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:31:02,766 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:31:02,766 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:31:02,767 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7bf281cf3991,38217,1731497394782' ***** 2024-11-13T11:31:02,767 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T11:31:02,768 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T11:31:02,768 INFO [RS:0;7bf281cf3991:38217 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T11:31:02,768 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T11:31:02,768 INFO [RS:0;7bf281cf3991:38217 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T11:31:02,768 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.HRegionServer(3091): Received CLOSE for 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:31:02,768 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.HRegionServer(3091): Received CLOSE for 343252d4dd2ed97f4f7c754befd7d1d4 2024-11-13T11:31:02,768 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.HRegionServer(959): stopping server 7bf281cf3991,38217,1731497394782 2024-11-13T11:31:02,768 INFO [RS:0;7bf281cf3991:38217 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T11:31:02,768 INFO [RS:0;7bf281cf3991:38217 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7bf281cf3991:38217. 2024-11-13T11:31:02,768 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5e76b6ca748a25342e27876a7fc094c8, disabling compactions & flushes 2024-11-13T11:31:02,768 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8. 2024-11-13T11:31:02,768 DEBUG [RS:0;7bf281cf3991:38217 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:31:02,768 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8. 2024-11-13T11:31:02,768 DEBUG [RS:0;7bf281cf3991:38217 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:31:02,769 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8. after waiting 0 ms 2024-11-13T11:31:02,769 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8. 2024-11-13T11:31:02,769 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T11:31:02,769 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T11:31:02,769 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T11:31:02,769 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T11:31:02,769 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-13T11:31:02,769 DEBUG [RS:0;7bf281cf3991:38217 {}] regionserver.HRegionServer(1325): Online Regions={5e76b6ca748a25342e27876a7fc094c8=TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8., 1588230740=hbase:meta,,1.1588230740, 343252d4dd2ed97f4f7c754befd7d1d4=TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4.} 2024-11-13T11:31:02,769 DEBUG [RS:0;7bf281cf3991:38217 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 343252d4dd2ed97f4f7c754befd7d1d4, 5e76b6ca748a25342e27876a7fc094c8 2024-11-13T11:31:02,769 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T11:31:02,769 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T11:31:02,769 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T11:31:02,769 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T11:31:02,769 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T11:31:02,769 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/4a7671e9b98a410894b741122d58b322.fc299128ef471b6596e90fdba7a5058f->hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/4a7671e9b98a410894b741122d58b322-top, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-805459ed1d5d492e8c1f6e5de6bab2fc, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-f5a5d31a6fb84723ac85f5d6a665c9f7, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/ba41fc2c85a44bf9a1c4d26cc422ff32, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-eced3d5057fa4eaaba43f9a67d348594, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/03baa6362aa24bc08fef1dcb65162d50, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/df1ee93f8fe54c2ea8b47d9125a6b0c4, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/a01fce08f6214f53807d667d8411de84, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/ac11202283e5466aa96a307126a00570, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/3498726d2dfb4d46b67757591f7fd68a, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/39c845e1ecc5478ebb0af9f0be29f130, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/899f96c772e945d1be4a302645eed1d1, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/a2631fbda2e94ee0b64a03f1db222ae8, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/9d13cda9a3bd4ad7bee2061205f53a85, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/51187e41368045169f93948bc343c904, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/f73c194a7be44098a18b3651714d90d8, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/550b4390664e4aafb76ed4a5c031136c, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/0e36b3e7049a4c7bb4e47b2349663133, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/dfbd71bc1f3f440a8e865ab916d052e4, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/b728cd31f39149b8ad466db46f7f97f3, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/f6801a811f9849e0bb6549d17652db60, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/a5d613c9cad746ac8554d118804fa125] to archive 2024-11-13T11:31:02,771 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T11:31:02,773 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/4a7671e9b98a410894b741122d58b322.fc299128ef471b6596e90fdba7a5058f to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/4a7671e9b98a410894b741122d58b322.fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:31:02,774 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-805459ed1d5d492e8c1f6e5de6bab2fc to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-805459ed1d5d492e8c1f6e5de6bab2fc 2024-11-13T11:31:02,774 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-13T11:31:02,775 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T11:31:02,775 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T11:31:02,775 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-f5a5d31a6fb84723ac85f5d6a665c9f7 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-f5a5d31a6fb84723ac85f5d6a665c9f7 2024-11-13T11:31:02,775 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731497462769Running coprocessor pre-close hooks at 1731497462769Disabling compacts and flushes for region at 1731497462769Disabling writes for close at 1731497462769Writing region close event to WAL at 1731497462771 (+2 ms)Running coprocessor post-close hooks at 1731497462775 (+4 ms)Closed at 1731497462775 2024-11-13T11:31:02,775 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T11:31:02,776 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/ba41fc2c85a44bf9a1c4d26cc422ff32 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/ba41fc2c85a44bf9a1c4d26cc422ff32 2024-11-13T11:31:02,777 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-eced3d5057fa4eaaba43f9a67d348594 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/TestLogRolling-testLogRolling=fc299128ef471b6596e90fdba7a5058f-eced3d5057fa4eaaba43f9a67d348594 2024-11-13T11:31:02,778 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/03baa6362aa24bc08fef1dcb65162d50 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/03baa6362aa24bc08fef1dcb65162d50 2024-11-13T11:31:02,779 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/df1ee93f8fe54c2ea8b47d9125a6b0c4 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/df1ee93f8fe54c2ea8b47d9125a6b0c4 2024-11-13T11:31:02,780 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/a01fce08f6214f53807d667d8411de84 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/a01fce08f6214f53807d667d8411de84 2024-11-13T11:31:02,781 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/ac11202283e5466aa96a307126a00570 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/ac11202283e5466aa96a307126a00570 2024-11-13T11:31:02,783 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/3498726d2dfb4d46b67757591f7fd68a to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/3498726d2dfb4d46b67757591f7fd68a 2024-11-13T11:31:02,784 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/39c845e1ecc5478ebb0af9f0be29f130 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/39c845e1ecc5478ebb0af9f0be29f130 2024-11-13T11:31:02,785 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/899f96c772e945d1be4a302645eed1d1 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/899f96c772e945d1be4a302645eed1d1 2024-11-13T11:31:02,786 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/a2631fbda2e94ee0b64a03f1db222ae8 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/a2631fbda2e94ee0b64a03f1db222ae8 2024-11-13T11:31:02,787 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/9d13cda9a3bd4ad7bee2061205f53a85 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/9d13cda9a3bd4ad7bee2061205f53a85 2024-11-13T11:31:02,788 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/51187e41368045169f93948bc343c904 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/51187e41368045169f93948bc343c904 2024-11-13T11:31:02,789 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/f73c194a7be44098a18b3651714d90d8 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/f73c194a7be44098a18b3651714d90d8 2024-11-13T11:31:02,790 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/550b4390664e4aafb76ed4a5c031136c to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/550b4390664e4aafb76ed4a5c031136c 2024-11-13T11:31:02,791 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/0e36b3e7049a4c7bb4e47b2349663133 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/0e36b3e7049a4c7bb4e47b2349663133 2024-11-13T11:31:02,792 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/dfbd71bc1f3f440a8e865ab916d052e4 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/dfbd71bc1f3f440a8e865ab916d052e4 2024-11-13T11:31:02,793 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/b728cd31f39149b8ad466db46f7f97f3 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/b728cd31f39149b8ad466db46f7f97f3 2024-11-13T11:31:02,794 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/f6801a811f9849e0bb6549d17652db60 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/f6801a811f9849e0bb6549d17652db60 2024-11-13T11:31:02,795 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/a5d613c9cad746ac8554d118804fa125 to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/info/a5d613c9cad746ac8554d118804fa125 2024-11-13T11:31:02,796 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=7bf281cf3991:45517 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-13T11:31:02,796 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [ba41fc2c85a44bf9a1c4d26cc422ff32=43081, 03baa6362aa24bc08fef1dcb65162d50=12516, df1ee93f8fe54c2ea8b47d9125a6b0c4=63733, a01fce08f6214f53807d667d8411de84=17906, ac11202283e5466aa96a307126a00570=16828, 3498726d2dfb4d46b67757591f7fd68a=83312, 39c845e1ecc5478ebb0af9f0be29f130=12516, 899f96c772e945d1be4a302645eed1d1=29784, a2631fbda2e94ee0b64a03f1db222ae8=115762, 9d13cda9a3bd4ad7bee2061205f53a85=12516, 51187e41368045169f93948bc343c904=20078, f73c194a7be44098a18b3651714d90d8=145175, 550b4390664e4aafb76ed4a5c031136c=19000, 0e36b3e7049a4c7bb4e47b2349663133=12519, dfbd71bc1f3f440a8e865ab916d052e4=177651, b728cd31f39149b8ad466db46f7f97f3=29807, f6801a811f9849e0bb6549d17652db60=12523, a5d613c9cad746ac8554d118804fa125=19013] 2024-11-13T11:31:02,799 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/5e76b6ca748a25342e27876a7fc094c8/recovered.edits/338.seqid, newMaxSeqId=338, maxSeqId=126 2024-11-13T11:31:02,800 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8. 2024-11-13T11:31:02,800 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5e76b6ca748a25342e27876a7fc094c8: Waiting for close lock at 1731497462768Running coprocessor pre-close hooks at 1731497462768Disabling compacts and flushes for region at 1731497462768Disabling writes for close at 1731497462769 (+1 ms)Writing region close event to WAL at 1731497462796 (+27 ms)Running coprocessor post-close hooks at 1731497462800 (+4 ms)Closed at 1731497462800 2024-11-13T11:31:02,800 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731497419985.5e76b6ca748a25342e27876a7fc094c8. 2024-11-13T11:31:02,800 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 343252d4dd2ed97f4f7c754befd7d1d4, disabling compactions & flushes 2024-11-13T11:31:02,800 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4. 2024-11-13T11:31:02,800 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4. 2024-11-13T11:31:02,800 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4. after waiting 0 ms 2024-11-13T11:31:02,800 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4. 2024-11-13T11:31:02,801 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/343252d4dd2ed97f4f7c754befd7d1d4/info/4a7671e9b98a410894b741122d58b322.fc299128ef471b6596e90fdba7a5058f->hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/fc299128ef471b6596e90fdba7a5058f/info/4a7671e9b98a410894b741122d58b322-bottom] to archive 2024-11-13T11:31:02,802 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T11:31:02,803 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/343252d4dd2ed97f4f7c754befd7d1d4/info/4a7671e9b98a410894b741122d58b322.fc299128ef471b6596e90fdba7a5058f to hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/archive/data/default/TestLogRolling-testLogRolling/343252d4dd2ed97f4f7c754befd7d1d4/info/4a7671e9b98a410894b741122d58b322.fc299128ef471b6596e90fdba7a5058f 2024-11-13T11:31:02,803 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-13T11:31:02,806 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/data/default/TestLogRolling-testLogRolling/343252d4dd2ed97f4f7c754befd7d1d4/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-11-13T11:31:02,807 INFO [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4. 2024-11-13T11:31:02,807 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 343252d4dd2ed97f4f7c754befd7d1d4: Waiting for close lock at 1731497462800Running coprocessor pre-close hooks at 1731497462800Disabling compacts and flushes for region at 1731497462800Disabling writes for close at 1731497462800Writing region close event to WAL at 1731497462803 (+3 ms)Running coprocessor post-close hooks at 1731497462807 (+4 ms)Closed at 1731497462807 2024-11-13T11:31:02,807 DEBUG [RS_CLOSE_REGION-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731497419985.343252d4dd2ed97f4f7c754befd7d1d4. 2024-11-13T11:31:02,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:31:02,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:31:02,969 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.HRegionServer(976): stopping server 7bf281cf3991,38217,1731497394782; all regions closed. 2024-11-13T11:31:02,970 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:02,970 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:02,970 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:02,970 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:02,971 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:02,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741834_1010 (size=8107) 2024-11-13T11:31:02,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741834_1010 (size=8107) 2024-11-13T11:31:02,974 DEBUG [RS:0;7bf281cf3991:38217 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/oldWALs 2024-11-13T11:31:02,974 INFO [RS:0;7bf281cf3991:38217 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7bf281cf3991%2C38217%2C1731497394782.meta:.meta(num 1731497395527) 2024-11-13T11:31:02,974 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:02,974 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:02,974 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:02,974 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:02,975 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:02,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741878_1054 (size=780) 2024-11-13T11:31:02,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741878_1054 (size=780) 2024-11-13T11:31:02,978 DEBUG [RS:0;7bf281cf3991:38217 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/oldWALs 2024-11-13T11:31:02,978 INFO [RS:0;7bf281cf3991:38217 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7bf281cf3991%2C38217%2C1731497394782:(num 1731497462652) 2024-11-13T11:31:02,978 DEBUG [RS:0;7bf281cf3991:38217 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:31:02,978 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T11:31:02,978 INFO [RS:0;7bf281cf3991:38217 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T11:31:02,978 INFO [RS:0;7bf281cf3991:38217 {}] hbase.ChoreService(370): Chore service for: regionserver/7bf281cf3991:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T11:31:02,978 INFO [RS:0;7bf281cf3991:38217 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T11:31:02,978 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T11:31:02,978 INFO [RS:0;7bf281cf3991:38217 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38217 2024-11-13T11:31:02,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7bf281cf3991,38217,1731497394782 2024-11-13T11:31:02,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T11:31:02,980 INFO [RS:0;7bf281cf3991:38217 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T11:31:02,980 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7bf281cf3991,38217,1731497394782] 2024-11-13T11:31:02,981 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7bf281cf3991,38217,1731497394782 already deleted, retry=false 2024-11-13T11:31:02,981 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7bf281cf3991,38217,1731497394782 expired; onlineServers=0 2024-11-13T11:31:02,981 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7bf281cf3991,45517,1731497394742' ***** 2024-11-13T11:31:02,981 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T11:31:02,981 INFO [M:0;7bf281cf3991:45517 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T11:31:02,981 INFO [M:0;7bf281cf3991:45517 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T11:31:02,981 DEBUG [M:0;7bf281cf3991:45517 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T11:31:02,981 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T11:31:02,981 DEBUG [M:0;7bf281cf3991:45517 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T11:31:02,981 DEBUG [master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497394916 {}] cleaner.HFileCleaner(306): Exit Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497394916,5,FailOnTimeoutGroup] 2024-11-13T11:31:02,981 DEBUG [master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497394916 {}] cleaner.HFileCleaner(306): Exit Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497394916,5,FailOnTimeoutGroup] 2024-11-13T11:31:02,981 INFO [M:0;7bf281cf3991:45517 {}] hbase.ChoreService(370): Chore service for: master/7bf281cf3991:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T11:31:02,981 INFO [M:0;7bf281cf3991:45517 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T11:31:02,982 DEBUG [M:0;7bf281cf3991:45517 {}] master.HMaster(1795): Stopping service threads 2024-11-13T11:31:02,982 INFO [M:0;7bf281cf3991:45517 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T11:31:02,982 INFO [M:0;7bf281cf3991:45517 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T11:31:02,982 INFO [M:0;7bf281cf3991:45517 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T11:31:02,982 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T11:31:02,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T11:31:02,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:31:02,982 DEBUG [M:0;7bf281cf3991:45517 {}] zookeeper.ZKUtil(347): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T11:31:02,982 WARN [M:0;7bf281cf3991:45517 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T11:31:02,983 INFO [M:0;7bf281cf3991:45517 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/.lastflushedseqids 2024-11-13T11:31:02,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741879_1055 (size=228) 2024-11-13T11:31:02,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741879_1055 (size=228) 2024-11-13T11:31:02,988 INFO [M:0;7bf281cf3991:45517 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T11:31:02,988 INFO [M:0;7bf281cf3991:45517 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T11:31:02,988 DEBUG [M:0;7bf281cf3991:45517 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T11:31:02,988 INFO [M:0;7bf281cf3991:45517 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:31:02,988 DEBUG [M:0;7bf281cf3991:45517 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:31:02,988 DEBUG [M:0;7bf281cf3991:45517 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T11:31:02,988 DEBUG [M:0;7bf281cf3991:45517 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:31:02,988 INFO [M:0;7bf281cf3991:45517 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.36 KB 2024-11-13T11:31:03,002 DEBUG [M:0;7bf281cf3991:45517 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e257eb3cca0f485d8a7d605f1931e5e7 is 82, key is hbase:meta,,1/info:regioninfo/1731497395552/Put/seqid=0 2024-11-13T11:31:03,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741880_1056 (size=5672) 2024-11-13T11:31:03,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741880_1056 (size=5672) 2024-11-13T11:31:03,010 INFO [M:0;7bf281cf3991:45517 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e257eb3cca0f485d8a7d605f1931e5e7 2024-11-13T11:31:03,030 INFO [regionserver/7bf281cf3991:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T11:31:03,031 DEBUG [M:0;7bf281cf3991:45517 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9fed1ddd33124335b26c0b70e7d355aa is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731497395996/Put/seqid=0 2024-11-13T11:31:03,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741881_1057 (size=7091) 2024-11-13T11:31:03,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741881_1057 (size=7091) 2024-11-13T11:31:03,036 INFO [M:0;7bf281cf3991:45517 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9fed1ddd33124335b26c0b70e7d355aa 2024-11-13T11:31:03,039 INFO [M:0;7bf281cf3991:45517 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9fed1ddd33124335b26c0b70e7d355aa 2024-11-13T11:31:03,053 DEBUG [M:0;7bf281cf3991:45517 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6eadb09ded8749c089a452f3c563ef21 is 69, key is 7bf281cf3991,38217,1731497394782/rs:state/1731497395015/Put/seqid=0 2024-11-13T11:31:03,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741882_1058 (size=5156) 2024-11-13T11:31:03,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741882_1058 (size=5156) 2024-11-13T11:31:03,057 INFO [M:0;7bf281cf3991:45517 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6eadb09ded8749c089a452f3c563ef21 2024-11-13T11:31:03,075 DEBUG [M:0;7bf281cf3991:45517 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/26a193ca62a34cefbb6599b53732e853 is 52, key is load_balancer_on/state:d/1731497395608/Put/seqid=0 2024-11-13T11:31:03,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741883_1059 (size=5056) 2024-11-13T11:31:03,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741883_1059 (size=5056) 2024-11-13T11:31:03,080 INFO [M:0;7bf281cf3991:45517 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/26a193ca62a34cefbb6599b53732e853 2024-11-13T11:31:03,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:31:03,081 INFO [RS:0;7bf281cf3991:38217 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T11:31:03,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38217-0x10038d8de920001, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:31:03,081 INFO [RS:0;7bf281cf3991:38217 {}] regionserver.HRegionServer(1031): Exiting; stopping=7bf281cf3991,38217,1731497394782; zookeeper connection closed. 2024-11-13T11:31:03,081 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3fb134e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3fb134e 2024-11-13T11:31:03,081 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T11:31:03,084 DEBUG [M:0;7bf281cf3991:45517 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e257eb3cca0f485d8a7d605f1931e5e7 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e257eb3cca0f485d8a7d605f1931e5e7 2024-11-13T11:31:03,089 INFO [M:0;7bf281cf3991:45517 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e257eb3cca0f485d8a7d605f1931e5e7, entries=8, sequenceid=125, filesize=5.5 K 2024-11-13T11:31:03,090 DEBUG [M:0;7bf281cf3991:45517 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9fed1ddd33124335b26c0b70e7d355aa as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9fed1ddd33124335b26c0b70e7d355aa 2024-11-13T11:31:03,094 INFO [M:0;7bf281cf3991:45517 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9fed1ddd33124335b26c0b70e7d355aa 2024-11-13T11:31:03,094 INFO [M:0;7bf281cf3991:45517 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9fed1ddd33124335b26c0b70e7d355aa, entries=13, sequenceid=125, filesize=6.9 K 2024-11-13T11:31:03,095 DEBUG [M:0;7bf281cf3991:45517 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6eadb09ded8749c089a452f3c563ef21 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6eadb09ded8749c089a452f3c563ef21 2024-11-13T11:31:03,099 INFO [M:0;7bf281cf3991:45517 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6eadb09ded8749c089a452f3c563ef21, entries=1, sequenceid=125, filesize=5.0 K 2024-11-13T11:31:03,100 DEBUG [M:0;7bf281cf3991:45517 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/26a193ca62a34cefbb6599b53732e853 as hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/26a193ca62a34cefbb6599b53732e853 2024-11-13T11:31:03,104 INFO [M:0;7bf281cf3991:45517 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45435/user/jenkins/test-data/3fe8b8aa-7b48-97d7-c36c-3c0bb408d2ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/26a193ca62a34cefbb6599b53732e853, entries=1, sequenceid=125, filesize=4.9 K 2024-11-13T11:31:03,105 INFO [M:0;7bf281cf3991:45517 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=125, compaction requested=false 2024-11-13T11:31:03,106 INFO [M:0;7bf281cf3991:45517 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:31:03,106 DEBUG [M:0;7bf281cf3991:45517 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731497462988Disabling compacts and flushes for region at 1731497462988Disabling writes for close at 1731497462988Obtaining lock to block concurrent updates at 1731497462988Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731497462988Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1731497462989 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731497462989Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731497462989Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731497463002 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731497463002Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731497463016 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731497463031 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731497463031Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731497463039 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731497463052 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731497463052Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731497463061 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731497463075 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731497463075Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@197e3e03: reopening flushed file at 1731497463084 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c281737: reopening flushed file at 1731497463089 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73b6720e: reopening flushed file at 1731497463094 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4867fc61: reopening flushed file at 1731497463099 (+5 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=125, compaction requested=false at 1731497463105 (+6 ms)Writing region close event to WAL at 1731497463106 (+1 ms)Closed at 1731497463106 2024-11-13T11:31:03,106 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:03,106 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:03,106 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:03,106 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:03,106 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:03,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35381 is added to blk_1073741830_1006 (size=61332) 2024-11-13T11:31:03,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36099 is added to blk_1073741830_1006 (size=61332) 2024-11-13T11:31:03,109 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T11:31:03,109 INFO [M:0;7bf281cf3991:45517 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T11:31:03,109 INFO [M:0;7bf281cf3991:45517 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45517 2024-11-13T11:31:03,109 INFO [M:0;7bf281cf3991:45517 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T11:31:03,210 INFO [M:0;7bf281cf3991:45517 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T11:31:03,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:31:03,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45517-0x10038d8de920000, quorum=127.0.0.1:62626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:31:03,213 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@fffa550{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:31:03,214 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1a183da4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:31:03,214 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:31:03,214 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13da1053{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:31:03,214 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a865b5f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/hadoop.log.dir/,STOPPED} 2024-11-13T11:31:03,216 WARN [BP-1554668404-172.17.0.2-1731497394136 heartbeating to localhost/127.0.0.1:45435 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:31:03,216 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:31:03,216 WARN [BP-1554668404-172.17.0.2-1731497394136 heartbeating to localhost/127.0.0.1:45435 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1554668404-172.17.0.2-1731497394136 (Datanode Uuid 39141b50-6f68-4cdd-a4ff-e8286004802c) service to localhost/127.0.0.1:45435 2024-11-13T11:31:03,216 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:31:03,217 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/cluster_cb21362e-aa2c-e7f9-4e70-7396e0fd05eb/data/data3/current/BP-1554668404-172.17.0.2-1731497394136 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:31:03,217 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/cluster_cb21362e-aa2c-e7f9-4e70-7396e0fd05eb/data/data4/current/BP-1554668404-172.17.0.2-1731497394136 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:31:03,217 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:31:03,220 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5df62b4a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:31:03,221 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5f41f884{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:31:03,221 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:31:03,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60993b69{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:31:03,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7bd0bba8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/hadoop.log.dir/,STOPPED} 2024-11-13T11:31:03,222 WARN [BP-1554668404-172.17.0.2-1731497394136 heartbeating to localhost/127.0.0.1:45435 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:31:03,222 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:31:03,222 WARN [BP-1554668404-172.17.0.2-1731497394136 heartbeating to localhost/127.0.0.1:45435 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1554668404-172.17.0.2-1731497394136 (Datanode Uuid 03d2a3d9-7807-4b68-8407-dd950d311662) service to localhost/127.0.0.1:45435 2024-11-13T11:31:03,222 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:31:03,223 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/cluster_cb21362e-aa2c-e7f9-4e70-7396e0fd05eb/data/data1/current/BP-1554668404-172.17.0.2-1731497394136 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:31:03,223 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/cluster_cb21362e-aa2c-e7f9-4e70-7396e0fd05eb/data/data2/current/BP-1554668404-172.17.0.2-1731497394136 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:31:03,223 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:31:03,228 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@928ae22{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T11:31:03,228 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7ab7d584{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:31:03,228 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:31:03,228 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7af4b83b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:31:03,228 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b345207{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/hadoop.log.dir/,STOPPED} 2024-11-13T11:31:03,234 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T11:31:03,266 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T11:31:03,273 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=226 (was 206) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:45435 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45435 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:45435 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:45435 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45435 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45435 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:45435 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: region-location-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45435 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=518 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=74 (was 61) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3076 (was 2074) - AvailableMemoryMB LEAK? - 2024-11-13T11:31:03,280 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=226, OpenFileDescriptor=518, MaxFileDescriptor=1048576, SystemLoadAverage=74, ProcessCount=11, AvailableMemoryMB=3076 2024-11-13T11:31:03,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T11:31:03,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/hadoop.log.dir so I do NOT create it in target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b 2024-11-13T11:31:03,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b35b8de0-4b01-1900-49de-655f132af489/hadoop.tmp.dir so I do NOT create it in target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b 2024-11-13T11:31:03,280 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/cluster_66b1ab15-a2f5-2176-994d-fca4c662e8ed, deleteOnExit=true 2024-11-13T11:31:03,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T11:31:03,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/test.cache.data in system properties and HBase conf 2024-11-13T11:31:03,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T11:31:03,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/hadoop.log.dir in system properties and HBase conf 2024-11-13T11:31:03,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T11:31:03,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T11:31:03,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T11:31:03,281 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T11:31:03,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T11:31:03,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T11:31:03,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T11:31:03,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T11:31:03,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T11:31:03,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T11:31:03,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T11:31:03,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T11:31:03,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T11:31:03,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/nfs.dump.dir in system properties and HBase conf 2024-11-13T11:31:03,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/java.io.tmpdir in system properties and HBase conf 2024-11-13T11:31:03,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T11:31:03,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T11:31:03,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T11:31:03,296 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T11:31:03,336 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:31:03,339 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:31:03,341 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:31:03,341 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:31:03,341 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T11:31:03,341 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:31:03,342 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56dc53d9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:31:03,342 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2571c301{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:31:03,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4f69d6d9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/java.io.tmpdir/jetty-localhost-39499-hadoop-hdfs-3_4_1-tests_jar-_-any-7215597274540538525/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T11:31:03,436 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7d0e457e{HTTP/1.1, (http/1.1)}{localhost:39499} 2024-11-13T11:31:03,436 INFO [Time-limited test {}] server.Server(415): Started @305597ms 2024-11-13T11:31:03,446 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T11:31:03,482 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:31:03,484 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:31:03,485 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:31:03,485 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:31:03,485 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T11:31:03,486 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c81f758{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:31:03,486 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@443fd1e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:31:03,578 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@13a6cd2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/java.io.tmpdir/jetty-localhost-40811-hadoop-hdfs-3_4_1-tests_jar-_-any-12040530545019474538/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:31:03,578 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@63aa0f5c{HTTP/1.1, (http/1.1)}{localhost:40811} 2024-11-13T11:31:03,578 INFO [Time-limited test {}] server.Server(415): Started @305739ms 2024-11-13T11:31:03,579 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:31:03,602 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T11:31:03,605 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T11:31:03,605 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T11:31:03,605 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T11:31:03,605 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T11:31:03,606 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7df47ec9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/hadoop.log.dir/,AVAILABLE} 2024-11-13T11:31:03,606 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4cc41e8e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T11:31:03,634 WARN [Thread-2474 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/cluster_66b1ab15-a2f5-2176-994d-fca4c662e8ed/data/data1/current/BP-745397936-172.17.0.2-1731497463299/current, will proceed with Du for space computation calculation, 2024-11-13T11:31:03,638 WARN [Thread-2475 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/cluster_66b1ab15-a2f5-2176-994d-fca4c662e8ed/data/data2/current/BP-745397936-172.17.0.2-1731497463299/current, will proceed with Du for space computation calculation, 2024-11-13T11:31:03,659 WARN [Thread-2453 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:31:03,661 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcfbe8c34dcdc425c with lease ID 0x9b5b0ced79e1e211: Processing first storage report for DS-77f26ff5-1a62-4362-9b87-3d22700afca9 from datanode DatanodeRegistration(127.0.0.1:39521, datanodeUuid=17b846f7-7606-478a-9631-6401383d6327, infoPort=39285, infoSecurePort=0, ipcPort=45525, storageInfo=lv=-57;cid=testClusterID;nsid=2092140167;c=1731497463299) 2024-11-13T11:31:03,661 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcfbe8c34dcdc425c with lease ID 0x9b5b0ced79e1e211: from storage DS-77f26ff5-1a62-4362-9b87-3d22700afca9 node DatanodeRegistration(127.0.0.1:39521, datanodeUuid=17b846f7-7606-478a-9631-6401383d6327, infoPort=39285, infoSecurePort=0, ipcPort=45525, storageInfo=lv=-57;cid=testClusterID;nsid=2092140167;c=1731497463299), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:31:03,661 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcfbe8c34dcdc425c with lease ID 0x9b5b0ced79e1e211: Processing first storage report for DS-caa27680-408d-4de6-a5ee-22d5e6e745f8 from datanode DatanodeRegistration(127.0.0.1:39521, datanodeUuid=17b846f7-7606-478a-9631-6401383d6327, infoPort=39285, infoSecurePort=0, ipcPort=45525, storageInfo=lv=-57;cid=testClusterID;nsid=2092140167;c=1731497463299) 2024-11-13T11:31:03,662 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcfbe8c34dcdc425c with lease ID 0x9b5b0ced79e1e211: from storage DS-caa27680-408d-4de6-a5ee-22d5e6e745f8 node DatanodeRegistration(127.0.0.1:39521, datanodeUuid=17b846f7-7606-478a-9631-6401383d6327, infoPort=39285, infoSecurePort=0, ipcPort=45525, storageInfo=lv=-57;cid=testClusterID;nsid=2092140167;c=1731497463299), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:31:03,707 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@47188d26{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/java.io.tmpdir/jetty-localhost-33483-hadoop-hdfs-3_4_1-tests_jar-_-any-1360574279718210484/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:31:03,707 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@29692e24{HTTP/1.1, (http/1.1)}{localhost:33483} 2024-11-13T11:31:03,707 INFO [Time-limited test {}] server.Server(415): Started @305868ms 2024-11-13T11:31:03,708 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T11:31:03,779 WARN [Thread-2501 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/cluster_66b1ab15-a2f5-2176-994d-fca4c662e8ed/data/data4/current/BP-745397936-172.17.0.2-1731497463299/current, will proceed with Du for space computation calculation, 2024-11-13T11:31:03,779 WARN [Thread-2500 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/cluster_66b1ab15-a2f5-2176-994d-fca4c662e8ed/data/data3/current/BP-745397936-172.17.0.2-1731497463299/current, will proceed with Du for space computation calculation, 2024-11-13T11:31:03,805 WARN [Thread-2489 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T11:31:03,807 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc898b97d4ea03664 with lease ID 0x9b5b0ced79e1e212: Processing first storage report for DS-fe89ceee-57ca-4410-943b-f9ffe4a03471 from datanode DatanodeRegistration(127.0.0.1:36651, datanodeUuid=0b34b669-963f-446a-85df-db2115934b2b, infoPort=44857, infoSecurePort=0, ipcPort=36835, storageInfo=lv=-57;cid=testClusterID;nsid=2092140167;c=1731497463299) 2024-11-13T11:31:03,807 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc898b97d4ea03664 with lease ID 0x9b5b0ced79e1e212: from storage DS-fe89ceee-57ca-4410-943b-f9ffe4a03471 node DatanodeRegistration(127.0.0.1:36651, datanodeUuid=0b34b669-963f-446a-85df-db2115934b2b, infoPort=44857, infoSecurePort=0, ipcPort=36835, storageInfo=lv=-57;cid=testClusterID;nsid=2092140167;c=1731497463299), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:31:03,807 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc898b97d4ea03664 with lease ID 0x9b5b0ced79e1e212: Processing first storage report for DS-97a6bda7-b7fa-4254-8def-c6bcfddb15b0 from datanode DatanodeRegistration(127.0.0.1:36651, datanodeUuid=0b34b669-963f-446a-85df-db2115934b2b, infoPort=44857, infoSecurePort=0, ipcPort=36835, storageInfo=lv=-57;cid=testClusterID;nsid=2092140167;c=1731497463299) 2024-11-13T11:31:03,807 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc898b97d4ea03664 with lease ID 0x9b5b0ced79e1e212: from storage DS-97a6bda7-b7fa-4254-8def-c6bcfddb15b0 node DatanodeRegistration(127.0.0.1:36651, datanodeUuid=0b34b669-963f-446a-85df-db2115934b2b, infoPort=44857, infoSecurePort=0, ipcPort=36835, storageInfo=lv=-57;cid=testClusterID;nsid=2092140167;c=1731497463299), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T11:31:03,831 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b 2024-11-13T11:31:03,833 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/cluster_66b1ab15-a2f5-2176-994d-fca4c662e8ed/zookeeper_0, clientPort=51873, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/cluster_66b1ab15-a2f5-2176-994d-fca4c662e8ed/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/cluster_66b1ab15-a2f5-2176-994d-fca4c662e8ed/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T11:31:03,835 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51873 2024-11-13T11:31:03,836 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:31:03,837 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:31:03,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36651 is added to blk_1073741825_1001 (size=7) 2024-11-13T11:31:03,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39521 is added to blk_1073741825_1001 (size=7) 2024-11-13T11:31:03,845 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551 with version=8 2024-11-13T11:31:03,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42441/user/jenkins/test-data/a88f53c6-7c2b-951c-855a-7852c318109e/hbase-staging 2024-11-13T11:31:03,846 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7bf281cf3991:0 server-side Connection retries=45 2024-11-13T11:31:03,847 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:31:03,847 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T11:31:03,847 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T11:31:03,847 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:31:03,847 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T11:31:03,847 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T11:31:03,847 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T11:31:03,848 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34609 2024-11-13T11:31:03,848 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34609 connecting to ZooKeeper ensemble=127.0.0.1:51873 2024-11-13T11:31:03,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:346090x0, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T11:31:03,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:31:03,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:31:03,856 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34609-0x10038d9ec870000 connected 2024-11-13T11:31:03,870 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:31:03,872 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:31:03,878 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:31:03,878 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551, hbase.cluster.distributed=false 2024-11-13T11:31:03,879 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T11:31:03,880 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34609 2024-11-13T11:31:03,880 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34609 2024-11-13T11:31:03,880 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34609 2024-11-13T11:31:03,880 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34609 2024-11-13T11:31:03,880 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34609 2024-11-13T11:31:03,896 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7bf281cf3991:0 server-side Connection retries=45 2024-11-13T11:31:03,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:31:03,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T11:31:03,896 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T11:31:03,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T11:31:03,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T11:31:03,896 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T11:31:03,896 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T11:31:03,897 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42119 2024-11-13T11:31:03,898 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42119 connecting to ZooKeeper ensemble=127.0.0.1:51873 2024-11-13T11:31:03,899 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:31:03,901 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:31:03,905 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:421190x0, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T11:31:03,905 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42119-0x10038d9ec870001 connected 2024-11-13T11:31:03,905 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:31:03,906 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T11:31:03,906 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T11:31:03,907 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T11:31:03,908 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T11:31:03,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42119 2024-11-13T11:31:03,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42119 2024-11-13T11:31:03,917 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42119 2024-11-13T11:31:03,919 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42119 2024-11-13T11:31:03,920 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42119 2024-11-13T11:31:03,931 DEBUG [M:0;7bf281cf3991:34609 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7bf281cf3991:34609 2024-11-13T11:31:03,932 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7bf281cf3991,34609,1731497463846 2024-11-13T11:31:03,933 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:31:03,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:31:03,933 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7bf281cf3991,34609,1731497463846 2024-11-13T11:31:03,934 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T11:31:03,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:31:03,934 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:31:03,934 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T11:31:03,935 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7bf281cf3991,34609,1731497463846 from backup master directory 2024-11-13T11:31:03,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7bf281cf3991,34609,1731497463846 2024-11-13T11:31:03,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:31:03,936 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T11:31:03,936 WARN [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T11:31:03,936 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7bf281cf3991,34609,1731497463846 2024-11-13T11:31:03,939 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/hbase.id] with ID: 0e182208-2fed-42b5-8d97-6d07b91468e3 2024-11-13T11:31:03,939 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/.tmp/hbase.id 2024-11-13T11:31:03,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39521 is added to blk_1073741826_1002 (size=42) 2024-11-13T11:31:03,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36651 is added to blk_1073741826_1002 (size=42) 2024-11-13T11:31:03,959 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/.tmp/hbase.id]:[hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/hbase.id] 2024-11-13T11:31:03,965 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T11:31:03,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T11:31:03,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T11:31:03,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-13T11:31:03,968 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:31:03,969 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T11:31:03,970 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-13T11:31:03,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:31:03,971 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:31:03,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36651 is added to blk_1073741827_1003 (size=196) 2024-11-13T11:31:03,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39521 is added to blk_1073741827_1003 (size=196) 2024-11-13T11:31:03,981 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T11:31:03,982 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T11:31:03,982 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:31:03,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36651 is added to blk_1073741828_1004 (size=1189) 2024-11-13T11:31:03,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39521 is added to blk_1073741828_1004 (size=1189) 2024-11-13T11:31:03,996 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store 2024-11-13T11:31:04,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36651 is added to blk_1073741829_1005 (size=34) 2024-11-13T11:31:04,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39521 is added to blk_1073741829_1005 (size=34) 2024-11-13T11:31:04,002 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:31:04,002 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T11:31:04,002 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:31:04,002 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:31:04,002 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T11:31:04,002 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:31:04,002 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:31:04,002 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731497464002Disabling compacts and flushes for region at 1731497464002Disabling writes for close at 1731497464002Writing region close event to WAL at 1731497464002Closed at 1731497464002 2024-11-13T11:31:04,003 WARN [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/.initializing 2024-11-13T11:31:04,003 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/WALs/7bf281cf3991,34609,1731497463846 2024-11-13T11:31:04,005 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C34609%2C1731497463846, suffix=, logDir=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/WALs/7bf281cf3991,34609,1731497463846, archiveDir=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/oldWALs, maxLogs=10 2024-11-13T11:31:04,006 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C34609%2C1731497463846.1731497464005 2024-11-13T11:31:04,014 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/WALs/7bf281cf3991,34609,1731497463846/7bf281cf3991%2C34609%2C1731497463846.1731497464005 2024-11-13T11:31:04,017 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39285:39285),(127.0.0.1/127.0.0.1:44857:44857)] 2024-11-13T11:31:04,018 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T11:31:04,018 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:31:04,018 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:31:04,018 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:31:04,019 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:31:04,020 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T11:31:04,020 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:31:04,021 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:31:04,021 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:31:04,022 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T11:31:04,022 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:31:04,022 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:31:04,023 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:31:04,024 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T11:31:04,024 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:31:04,024 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:31:04,024 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:31:04,025 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T11:31:04,025 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:31:04,026 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T11:31:04,026 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:31:04,027 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:31:04,027 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:31:04,028 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:31:04,028 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:31:04,028 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T11:31:04,029 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T11:31:04,034 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T11:31:04,035 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=768673, jitterRate=-0.022581875324249268}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T11:31:04,035 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731497464018Initializing all the Stores at 1731497464019 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497464019Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497464019Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497464019Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497464019Cleaning up temporary data from old regions at 1731497464028 (+9 ms)Region opened successfully at 1731497464035 (+7 ms) 2024-11-13T11:31:04,037 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T11:31:04,040 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26b186cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7bf281cf3991/172.17.0.2:0 2024-11-13T11:31:04,041 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T11:31:04,041 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T11:31:04,041 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T11:31:04,041 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T11:31:04,042 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T11:31:04,042 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T11:31:04,042 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T11:31:04,047 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T11:31:04,048 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T11:31:04,049 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T11:31:04,049 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T11:31:04,050 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T11:31:04,050 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T11:31:04,051 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T11:31:04,054 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T11:31:04,054 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T11:31:04,055 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T11:31:04,056 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T11:31:04,058 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T11:31:04,059 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T11:31:04,060 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T11:31:04,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T11:31:04,060 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:31:04,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:31:04,061 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7bf281cf3991,34609,1731497463846, sessionid=0x10038d9ec870000, setting cluster-up flag (Was=false) 2024-11-13T11:31:04,062 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:31:04,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:31:04,066 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T11:31:04,067 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7bf281cf3991,34609,1731497463846 2024-11-13T11:31:04,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:31:04,069 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:31:04,072 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T11:31:04,073 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7bf281cf3991,34609,1731497463846 2024-11-13T11:31:04,075 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T11:31:04,079 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T11:31:04,079 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T11:31:04,079 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T11:31:04,079 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7bf281cf3991,34609,1731497463846 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T11:31:04,081 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:31:04,081 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:31:04,081 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:31:04,081 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7bf281cf3991:0, corePoolSize=5, maxPoolSize=5 2024-11-13T11:31:04,081 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7bf281cf3991:0, corePoolSize=10, maxPoolSize=10 2024-11-13T11:31:04,081 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:31:04,081 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7bf281cf3991:0, corePoolSize=2, maxPoolSize=2 2024-11-13T11:31:04,081 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:31:04,089 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:31:04,089 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T11:31:04,091 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:31:04,091 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T11:31:04,093 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731497494093 2024-11-13T11:31:04,093 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T11:31:04,093 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T11:31:04,093 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T11:31:04,093 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T11:31:04,093 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T11:31:04,093 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T11:31:04,097 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,098 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T11:31:04,098 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T11:31:04,098 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T11:31:04,098 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T11:31:04,098 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T11:31:04,105 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497464098,5,FailOnTimeoutGroup] 2024-11-13T11:31:04,105 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497464105,5,FailOnTimeoutGroup] 2024-11-13T11:31:04,105 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,105 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T11:31:04,105 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,105 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39521 is added to blk_1073741831_1007 (size=1321) 2024-11-13T11:31:04,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36651 is added to blk_1073741831_1007 (size=1321) 2024-11-13T11:31:04,114 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T11:31:04,114 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551 2024-11-13T11:31:04,126 INFO [RS:0;7bf281cf3991:42119 {}] regionserver.HRegionServer(746): ClusterId : 0e182208-2fed-42b5-8d97-6d07b91468e3 2024-11-13T11:31:04,126 DEBUG [RS:0;7bf281cf3991:42119 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T11:31:04,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39521 is added to blk_1073741832_1008 (size=32) 2024-11-13T11:31:04,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36651 is added to blk_1073741832_1008 (size=32) 2024-11-13T11:31:04,128 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:31:04,129 DEBUG [RS:0;7bf281cf3991:42119 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T11:31:04,129 DEBUG [RS:0;7bf281cf3991:42119 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T11:31:04,131 DEBUG [RS:0;7bf281cf3991:42119 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T11:31:04,131 DEBUG [RS:0;7bf281cf3991:42119 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37b4951c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7bf281cf3991/172.17.0.2:0 2024-11-13T11:31:04,133 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T11:31:04,135 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T11:31:04,135 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:31:04,135 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:31:04,135 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T11:31:04,137 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T11:31:04,137 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:31:04,137 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:31:04,137 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T11:31:04,138 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T11:31:04,138 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:31:04,139 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:31:04,139 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T11:31:04,141 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T11:31:04,141 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:31:04,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:31:04,142 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T11:31:04,143 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/data/hbase/meta/1588230740 2024-11-13T11:31:04,143 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/data/hbase/meta/1588230740 2024-11-13T11:31:04,144 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T11:31:04,144 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T11:31:04,145 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T11:31:04,146 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T11:31:04,150 DEBUG [RS:0;7bf281cf3991:42119 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7bf281cf3991:42119 2024-11-13T11:31:04,150 INFO [RS:0;7bf281cf3991:42119 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T11:31:04,150 INFO [RS:0;7bf281cf3991:42119 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T11:31:04,150 DEBUG [RS:0;7bf281cf3991:42119 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T11:31:04,151 INFO [RS:0;7bf281cf3991:42119 {}] regionserver.HRegionServer(2659): reportForDuty to master=7bf281cf3991,34609,1731497463846 with port=42119, startcode=1731497463895 2024-11-13T11:31:04,151 DEBUG [RS:0;7bf281cf3991:42119 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T11:31:04,154 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T11:31:04,154 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=857112, jitterRate=0.08987495303153992}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T11:31:04,155 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731497464128Initializing all the Stores at 1731497464129 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497464129Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497464133 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497464133Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497464133Cleaning up temporary data from old regions at 1731497464144 (+11 ms)Region opened successfully at 1731497464155 (+11 ms) 2024-11-13T11:31:04,155 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T11:31:04,155 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T11:31:04,155 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T11:31:04,155 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T11:31:04,155 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T11:31:04,161 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T11:31:04,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731497464155Disabling compacts and flushes for region at 1731497464155Disabling writes for close at 1731497464155Writing region close event to WAL at 1731497464161 (+6 ms)Closed at 1731497464161 2024-11-13T11:31:04,162 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48657, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T11:31:04,162 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:31:04,163 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T11:31:04,163 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34609 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7bf281cf3991,42119,1731497463895 2024-11-13T11:31:04,163 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34609 {}] master.ServerManager(517): Registering regionserver=7bf281cf3991,42119,1731497463895 2024-11-13T11:31:04,163 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T11:31:04,164 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T11:31:04,164 DEBUG [RS:0;7bf281cf3991:42119 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551 2024-11-13T11:31:04,164 DEBUG [RS:0;7bf281cf3991:42119 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42785 2024-11-13T11:31:04,165 DEBUG [RS:0;7bf281cf3991:42119 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T11:31:04,166 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T11:31:04,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T11:31:04,167 DEBUG [RS:0;7bf281cf3991:42119 {}] zookeeper.ZKUtil(111): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7bf281cf3991,42119,1731497463895 2024-11-13T11:31:04,167 WARN [RS:0;7bf281cf3991:42119 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T11:31:04,167 INFO [RS:0;7bf281cf3991:42119 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:31:04,167 DEBUG [RS:0;7bf281cf3991:42119 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/WALs/7bf281cf3991,42119,1731497463895 2024-11-13T11:31:04,172 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7bf281cf3991,42119,1731497463895] 2024-11-13T11:31:04,176 INFO [RS:0;7bf281cf3991:42119 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T11:31:04,177 INFO [RS:0;7bf281cf3991:42119 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T11:31:04,183 INFO [RS:0;7bf281cf3991:42119 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T11:31:04,183 INFO [RS:0;7bf281cf3991:42119 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,183 INFO [RS:0;7bf281cf3991:42119 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T11:31:04,184 INFO [RS:0;7bf281cf3991:42119 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T11:31:04,184 INFO [RS:0;7bf281cf3991:42119 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,184 DEBUG [RS:0;7bf281cf3991:42119 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:31:04,184 DEBUG [RS:0;7bf281cf3991:42119 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:31:04,184 DEBUG [RS:0;7bf281cf3991:42119 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:31:04,184 DEBUG [RS:0;7bf281cf3991:42119 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:31:04,184 DEBUG [RS:0;7bf281cf3991:42119 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:31:04,184 DEBUG [RS:0;7bf281cf3991:42119 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7bf281cf3991:0, corePoolSize=2, maxPoolSize=2 2024-11-13T11:31:04,184 DEBUG [RS:0;7bf281cf3991:42119 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:31:04,184 DEBUG [RS:0;7bf281cf3991:42119 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:31:04,184 DEBUG [RS:0;7bf281cf3991:42119 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:31:04,184 DEBUG [RS:0;7bf281cf3991:42119 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:31:04,184 DEBUG [RS:0;7bf281cf3991:42119 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:31:04,184 DEBUG [RS:0;7bf281cf3991:42119 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7bf281cf3991:0, corePoolSize=1, maxPoolSize=1 2024-11-13T11:31:04,184 DEBUG [RS:0;7bf281cf3991:42119 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7bf281cf3991:0, corePoolSize=3, maxPoolSize=3 2024-11-13T11:31:04,184 DEBUG [RS:0;7bf281cf3991:42119 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7bf281cf3991:0, corePoolSize=3, maxPoolSize=3 2024-11-13T11:31:04,185 INFO [RS:0;7bf281cf3991:42119 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,185 INFO [RS:0;7bf281cf3991:42119 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,185 INFO [RS:0;7bf281cf3991:42119 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,185 INFO [RS:0;7bf281cf3991:42119 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,185 INFO [RS:0;7bf281cf3991:42119 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,185 INFO [RS:0;7bf281cf3991:42119 {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,42119,1731497463895-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T11:31:04,205 INFO [RS:0;7bf281cf3991:42119 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T11:31:04,205 INFO [RS:0;7bf281cf3991:42119 {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,42119,1731497463895-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,206 INFO [RS:0;7bf281cf3991:42119 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,206 INFO [RS:0;7bf281cf3991:42119 {}] regionserver.Replication(171): 7bf281cf3991,42119,1731497463895 started 2024-11-13T11:31:04,221 INFO [RS:0;7bf281cf3991:42119 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,222 INFO [RS:0;7bf281cf3991:42119 {}] regionserver.HRegionServer(1482): Serving as 7bf281cf3991,42119,1731497463895, RpcServer on 7bf281cf3991/172.17.0.2:42119, sessionid=0x10038d9ec870001 2024-11-13T11:31:04,222 DEBUG [RS:0;7bf281cf3991:42119 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T11:31:04,222 DEBUG [RS:0;7bf281cf3991:42119 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7bf281cf3991,42119,1731497463895 2024-11-13T11:31:04,222 DEBUG [RS:0;7bf281cf3991:42119 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7bf281cf3991,42119,1731497463895' 2024-11-13T11:31:04,222 DEBUG [RS:0;7bf281cf3991:42119 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T11:31:04,222 DEBUG [RS:0;7bf281cf3991:42119 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T11:31:04,223 DEBUG [RS:0;7bf281cf3991:42119 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T11:31:04,223 DEBUG [RS:0;7bf281cf3991:42119 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T11:31:04,223 DEBUG [RS:0;7bf281cf3991:42119 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7bf281cf3991,42119,1731497463895 2024-11-13T11:31:04,223 DEBUG [RS:0;7bf281cf3991:42119 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7bf281cf3991,42119,1731497463895' 2024-11-13T11:31:04,223 DEBUG [RS:0;7bf281cf3991:42119 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T11:31:04,223 DEBUG [RS:0;7bf281cf3991:42119 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T11:31:04,223 DEBUG [RS:0;7bf281cf3991:42119 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T11:31:04,223 INFO [RS:0;7bf281cf3991:42119 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T11:31:04,223 INFO [RS:0;7bf281cf3991:42119 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T11:31:04,316 WARN [7bf281cf3991:34609 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T11:31:04,325 INFO [RS:0;7bf281cf3991:42119 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C42119%2C1731497463895, suffix=, logDir=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/WALs/7bf281cf3991,42119,1731497463895, archiveDir=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/oldWALs, maxLogs=32 2024-11-13T11:31:04,326 INFO [RS:0;7bf281cf3991:42119 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C42119%2C1731497463895.1731497464325 2024-11-13T11:31:04,338 INFO [RS:0;7bf281cf3991:42119 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/WALs/7bf281cf3991,42119,1731497463895/7bf281cf3991%2C42119%2C1731497463895.1731497464325 2024-11-13T11:31:04,347 DEBUG [RS:0;7bf281cf3991:42119 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39285:39285),(127.0.0.1/127.0.0.1:44857:44857)] 2024-11-13T11:31:04,493 DEBUG [master/7bf281cf3991:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=9, reused chunk count=73, reuseRatio=89.02% 2024-11-13T11:31:04,493 DEBUG [master/7bf281cf3991:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-13T11:31:04,566 DEBUG [7bf281cf3991:34609 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T11:31:04,567 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7bf281cf3991,42119,1731497463895 2024-11-13T11:31:04,568 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7bf281cf3991,42119,1731497463895, state=OPENING 2024-11-13T11:31:04,569 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T11:31:04,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:31:04,570 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:31:04,571 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:31:04,571 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:31:04,571 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T11:31:04,571 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7bf281cf3991,42119,1731497463895}] 2024-11-13T11:31:04,724 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T11:31:04,726 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42071, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T11:31:04,735 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T11:31:04,735 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:31:04,737 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7bf281cf3991%2C42119%2C1731497463895.meta, suffix=.meta, logDir=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/WALs/7bf281cf3991,42119,1731497463895, archiveDir=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/oldWALs, maxLogs=32 2024-11-13T11:31:04,737 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7bf281cf3991%2C42119%2C1731497463895.meta.1731497464737.meta 2024-11-13T11:31:04,750 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/WALs/7bf281cf3991,42119,1731497463895/7bf281cf3991%2C42119%2C1731497463895.meta.1731497464737.meta 2024-11-13T11:31:04,757 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39285:39285),(127.0.0.1/127.0.0.1:44857:44857)] 2024-11-13T11:31:04,759 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T11:31:04,759 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T11:31:04,759 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T11:31:04,759 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T11:31:04,759 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T11:31:04,759 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T11:31:04,759 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T11:31:04,759 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T11:31:04,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T11:31:04,762 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T11:31:04,762 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:31:04,762 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:31:04,762 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T11:31:04,763 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T11:31:04,763 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:31:04,767 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:31:04,767 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T11:31:04,768 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T11:31:04,768 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:31:04,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:31:04,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T11:31:04,769 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T11:31:04,769 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T11:31:04,770 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T11:31:04,770 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T11:31:04,771 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/data/hbase/meta/1588230740 2024-11-13T11:31:04,771 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/data/hbase/meta/1588230740 2024-11-13T11:31:04,773 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T11:31:04,773 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T11:31:04,773 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T11:31:04,774 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T11:31:04,775 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=725786, jitterRate=-0.07711611688137054}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T11:31:04,775 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T11:31:04,776 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731497464760Writing region info on filesystem at 1731497464760Initializing all the Stores at 1731497464760Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497464760Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497464760Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731497464760Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731497464760Cleaning up temporary data from old regions at 1731497464773 (+13 ms)Running coprocessor post-open hooks at 1731497464775 (+2 ms)Region opened successfully at 1731497464776 (+1 ms) 2024-11-13T11:31:04,777 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731497464723 2024-11-13T11:31:04,780 DEBUG [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T11:31:04,780 INFO [RS_OPEN_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T11:31:04,781 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7bf281cf3991,42119,1731497463895 2024-11-13T11:31:04,782 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7bf281cf3991,42119,1731497463895, state=OPEN 2024-11-13T11:31:04,784 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T11:31:04,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T11:31:04,784 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7bf281cf3991,42119,1731497463895 2024-11-13T11:31:04,784 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:31:04,784 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T11:31:04,787 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T11:31:04,787 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7bf281cf3991,42119,1731497463895 in 213 msec 2024-11-13T11:31:04,789 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T11:31:04,789 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 624 msec 2024-11-13T11:31:04,790 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T11:31:04,791 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T11:31:04,792 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T11:31:04,792 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7bf281cf3991,42119,1731497463895, seqNum=-1] 2024-11-13T11:31:04,792 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T11:31:04,794 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42129, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T11:31:04,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 721 msec 2024-11-13T11:31:04,800 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731497464800, completionTime=-1 2024-11-13T11:31:04,800 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T11:31:04,800 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T11:31:04,803 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T11:31:04,803 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731497524803 2024-11-13T11:31:04,803 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731497584803 2024-11-13T11:31:04,803 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-13T11:31:04,803 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,34609,1731497463846-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,803 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,34609,1731497463846-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,803 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,34609,1731497463846-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,804 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7bf281cf3991:34609, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,804 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,805 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,807 DEBUG [master/7bf281cf3991:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T11:31:04,810 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.874sec 2024-11-13T11:31:04,810 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T11:31:04,810 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T11:31:04,810 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T11:31:04,810 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T11:31:04,810 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T11:31:04,810 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,34609,1731497463846-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T11:31:04,810 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,34609,1731497463846-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T11:31:04,815 DEBUG [master/7bf281cf3991:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T11:31:04,815 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T11:31:04,815 INFO [master/7bf281cf3991:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7bf281cf3991,34609,1731497463846-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T11:31:04,827 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a275c3c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:31:04,827 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7bf281cf3991,34609,-1 for getting cluster id 2024-11-13T11:31:04,827 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T11:31:04,830 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0e182208-2fed-42b5-8d97-6d07b91468e3' 2024-11-13T11:31:04,831 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T11:31:04,831 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0e182208-2fed-42b5-8d97-6d07b91468e3" 2024-11-13T11:31:04,831 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d424c98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:31:04,831 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7bf281cf3991,34609,-1] 2024-11-13T11:31:04,832 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T11:31:04,832 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:31:04,833 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52566, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T11:31:04,834 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e3b45f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T11:31:04,835 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T11:31:04,836 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7bf281cf3991,42119,1731497463895, seqNum=-1] 2024-11-13T11:31:04,836 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T11:31:04,838 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59596, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T11:31:04,840 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7bf281cf3991,34609,1731497463846 2024-11-13T11:31:04,840 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T11:31:04,844 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T11:31:04,844 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T11:31:04,846 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/WALs/test.com,8080,1, archiveDir=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/oldWALs, maxLogs=32 2024-11-13T11:31:04,847 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731497464847 2024-11-13T11:31:04,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:31:04,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:31:04,857 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/WALs/test.com,8080,1/test.com%2C8080%2C1.1731497464847 2024-11-13T11:31:04,860 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44857:44857),(127.0.0.1/127.0.0.1:39285:39285)] 2024-11-13T11:31:04,864 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731497464864 2024-11-13T11:31:04,874 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:04,874 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:04,874 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:04,874 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:04,874 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:04,875 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/WALs/test.com,8080,1/test.com%2C8080%2C1.1731497464847 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/WALs/test.com,8080,1/test.com%2C8080%2C1.1731497464864 2024-11-13T11:31:04,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39521 is added to blk_1073741835_1011 (size=93) 2024-11-13T11:31:04,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36651 is added to blk_1073741835_1011 (size=93) 2024-11-13T11:31:04,885 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/WALs/test.com,8080,1/test.com%2C8080%2C1.1731497464847 to hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/oldWALs/test.com%2C8080%2C1.1731497464847 2024-11-13T11:31:04,886 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44857:44857),(127.0.0.1/127.0.0.1:39285:39285)] 2024-11-13T11:31:04,889 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:04,889 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:04,889 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:04,889 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:04,889 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:04,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39521 is added to blk_1073741836_1012 (size=93) 2024-11-13T11:31:04,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36651 is added to blk_1073741836_1012 (size=93) 2024-11-13T11:31:04,894 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/oldWALs 2024-11-13T11:31:04,894 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731497464864) 2024-11-13T11:31:04,894 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T11:31:04,894 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T11:31:04,894 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:31:04,894 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:31:04,894 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:31:04,894 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T11:31:04,894 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T11:31:04,894 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=308685533, stopped=false 2024-11-13T11:31:04,894 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7bf281cf3991,34609,1731497463846 2024-11-13T11:31:04,895 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T11:31:04,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T11:31:04,895 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:31:04,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:31:04,896 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T11:31:04,896 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T11:31:04,896 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:31:04,896 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:31:04,896 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7bf281cf3991,42119,1731497463895' ***** 2024-11-13T11:31:04,896 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T11:31:04,896 INFO [RS:0;7bf281cf3991:42119 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T11:31:04,896 INFO [RS:0;7bf281cf3991:42119 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T11:31:04,896 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T11:31:04,896 INFO [RS:0;7bf281cf3991:42119 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T11:31:04,897 INFO [RS:0;7bf281cf3991:42119 {}] regionserver.HRegionServer(959): stopping server 7bf281cf3991,42119,1731497463895 2024-11-13T11:31:04,897 INFO [RS:0;7bf281cf3991:42119 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T11:31:04,897 INFO [RS:0;7bf281cf3991:42119 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7bf281cf3991:42119. 2024-11-13T11:31:04,897 DEBUG [RS:0;7bf281cf3991:42119 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T11:31:04,897 DEBUG [RS:0;7bf281cf3991:42119 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:31:04,897 INFO [RS:0;7bf281cf3991:42119 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T11:31:04,897 INFO [RS:0;7bf281cf3991:42119 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T11:31:04,897 INFO [RS:0;7bf281cf3991:42119 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T11:31:04,897 INFO [RS:0;7bf281cf3991:42119 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T11:31:04,897 INFO [RS:0;7bf281cf3991:42119 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-13T11:31:04,897 DEBUG [RS:0;7bf281cf3991:42119 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-13T11:31:04,897 DEBUG [RS:0;7bf281cf3991:42119 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-13T11:31:04,897 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T11:31:04,897 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T11:31:04,898 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T11:31:04,898 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T11:31:04,898 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T11:31:04,898 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-13T11:31:04,901 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:31:04,901 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T11:31:04,923 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/data/hbase/meta/1588230740/.tmp/ns/d459b80eb5ef477d9ebec426a9de28c4 is 43, key is default/ns:d/1731497464794/Put/seqid=0 2024-11-13T11:31:04,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36651 is added to blk_1073741837_1013 (size=5153) 2024-11-13T11:31:04,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39521 is added to blk_1073741837_1013 (size=5153) 2024-11-13T11:31:04,929 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/data/hbase/meta/1588230740/.tmp/ns/d459b80eb5ef477d9ebec426a9de28c4 2024-11-13T11:31:04,936 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/data/hbase/meta/1588230740/.tmp/ns/d459b80eb5ef477d9ebec426a9de28c4 as hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/data/hbase/meta/1588230740/ns/d459b80eb5ef477d9ebec426a9de28c4 2024-11-13T11:31:04,941 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/data/hbase/meta/1588230740/ns/d459b80eb5ef477d9ebec426a9de28c4, entries=2, sequenceid=6, filesize=5.0 K 2024-11-13T11:31:04,942 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 44ms, sequenceid=6, compaction requested=false 2024-11-13T11:31:04,949 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T11:31:04,949 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T11:31:04,949 INFO [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T11:31:04,949 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731497464897Running coprocessor pre-close hooks at 1731497464897Disabling compacts and flushes for region at 1731497464897Disabling writes for close at 1731497464898 (+1 ms)Obtaining lock to block concurrent updates at 1731497464898Preparing flush snapshotting stores in 1588230740 at 1731497464898Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731497464898Flushing stores of hbase:meta,,1.1588230740 at 1731497464902 (+4 ms)Flushing 1588230740/ns: creating writer at 1731497464902Flushing 1588230740/ns: appending metadata at 1731497464923 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1731497464923Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@77aefc65: reopening flushed file at 1731497464935 (+12 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 44ms, sequenceid=6, compaction requested=false at 1731497464942 (+7 ms)Writing region close event to WAL at 1731497464945 (+3 ms)Running coprocessor post-close hooks at 1731497464949 (+4 ms)Closed at 1731497464949 2024-11-13T11:31:04,950 DEBUG [RS_CLOSE_META-regionserver/7bf281cf3991:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T11:31:05,097 INFO [RS:0;7bf281cf3991:42119 {}] regionserver.HRegionServer(976): stopping server 7bf281cf3991,42119,1731497463895; all regions closed. 2024-11-13T11:31:05,098 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:05,098 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:05,098 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:05,098 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:05,098 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:05,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36651 is added to blk_1073741834_1010 (size=1152) 2024-11-13T11:31:05,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39521 is added to blk_1073741834_1010 (size=1152) 2024-11-13T11:31:05,215 INFO [regionserver/7bf281cf3991:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T11:31:05,215 INFO [regionserver/7bf281cf3991:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T11:31:05,504 DEBUG [RS:0;7bf281cf3991:42119 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/oldWALs 2024-11-13T11:31:05,504 INFO [RS:0;7bf281cf3991:42119 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7bf281cf3991%2C42119%2C1731497463895.meta:.meta(num 1731497464737) 2024-11-13T11:31:05,504 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:05,504 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:05,505 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:05,505 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:05,505 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:05,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36651 is added to blk_1073741833_1009 (size=93) 2024-11-13T11:31:05,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39521 is added to blk_1073741833_1009 (size=93) 2024-11-13T11:31:05,509 DEBUG [RS:0;7bf281cf3991:42119 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/oldWALs 2024-11-13T11:31:05,509 INFO [RS:0;7bf281cf3991:42119 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7bf281cf3991%2C42119%2C1731497463895:(num 1731497464325) 2024-11-13T11:31:05,509 DEBUG [RS:0;7bf281cf3991:42119 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T11:31:05,509 INFO [RS:0;7bf281cf3991:42119 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T11:31:05,509 INFO [RS:0;7bf281cf3991:42119 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T11:31:05,509 INFO [RS:0;7bf281cf3991:42119 {}] hbase.ChoreService(370): Chore service for: regionserver/7bf281cf3991:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-13T11:31:05,510 INFO [RS:0;7bf281cf3991:42119 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T11:31:05,510 INFO [regionserver/7bf281cf3991:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T11:31:05,510 INFO [RS:0;7bf281cf3991:42119 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42119 2024-11-13T11:31:05,511 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7bf281cf3991,42119,1731497463895 2024-11-13T11:31:05,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T11:31:05,512 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7bf281cf3991,42119,1731497463895] 2024-11-13T11:31:05,512 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7bf281cf3991,42119,1731497463895 already deleted, retry=false 2024-11-13T11:31:05,513 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7bf281cf3991,42119,1731497463895 expired; onlineServers=0 2024-11-13T11:31:05,513 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7bf281cf3991,34609,1731497463846' ***** 2024-11-13T11:31:05,513 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T11:31:05,513 INFO [M:0;7bf281cf3991:34609 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T11:31:05,513 INFO [M:0;7bf281cf3991:34609 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T11:31:05,513 DEBUG [M:0;7bf281cf3991:34609 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T11:31:05,513 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T11:31:05,513 DEBUG [M:0;7bf281cf3991:34609 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T11:31:05,513 INFO [M:0;7bf281cf3991:34609 {}] hbase.ChoreService(370): Chore service for: master/7bf281cf3991:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T11:31:05,513 INFO [M:0;7bf281cf3991:34609 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T11:31:05,513 DEBUG [M:0;7bf281cf3991:34609 {}] master.HMaster(1795): Stopping service threads 2024-11-13T11:31:05,513 INFO [M:0;7bf281cf3991:34609 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T11:31:05,513 INFO [M:0;7bf281cf3991:34609 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T11:31:05,513 INFO [M:0;7bf281cf3991:34609 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T11:31:05,513 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T11:31:05,513 DEBUG [master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497464098 {}] cleaner.HFileCleaner(306): Exit Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.large.0-1731497464098,5,FailOnTimeoutGroup] 2024-11-13T11:31:05,515 DEBUG [master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497464105 {}] cleaner.HFileCleaner(306): Exit Thread[master/7bf281cf3991:0:becomeActiveMaster-HFileCleaner.small.0-1731497464105,5,FailOnTimeoutGroup] 2024-11-13T11:31:05,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T11:31:05,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:31:05,515 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T11:31:05,515 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T11:31:05,516 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T11:31:05,516 INFO [M:0;7bf281cf3991:34609 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/.lastflushedseqids 2024-11-13T11:31:05,517 INFO [RS:0;7bf281cf3991:42119 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T11:31:05,518 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-13T11:31:05,519 DEBUG [RegionServerTracker-0 {}] master.ActiveMasterManager(353): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-13T11:31:05,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36651 is added to blk_1073741838_1014 (size=99) 2024-11-13T11:31:05,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39521 is added to blk_1073741838_1014 (size=99) 2024-11-13T11:31:05,538 INFO [M:0;7bf281cf3991:34609 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T11:31:05,538 INFO [M:0;7bf281cf3991:34609 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T11:31:05,539 DEBUG [M:0;7bf281cf3991:34609 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T11:31:05,539 INFO [M:0;7bf281cf3991:34609 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:31:05,539 DEBUG [M:0;7bf281cf3991:34609 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:31:05,539 DEBUG [M:0;7bf281cf3991:34609 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T11:31:05,539 DEBUG [M:0;7bf281cf3991:34609 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:31:05,539 INFO [M:0;7bf281cf3991:34609 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-13T11:31:05,562 DEBUG [M:0;7bf281cf3991:34609 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/811e9b045b774e57a7a2f79e4c696b45 is 82, key is hbase:meta,,1/info:regioninfo/1731497464780/Put/seqid=0 2024-11-13T11:31:05,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36651 is added to blk_1073741839_1015 (size=5672) 2024-11-13T11:31:05,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39521 is added to blk_1073741839_1015 (size=5672) 2024-11-13T11:31:05,571 INFO [M:0;7bf281cf3991:34609 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/811e9b045b774e57a7a2f79e4c696b45 2024-11-13T11:31:05,599 DEBUG [M:0;7bf281cf3991:34609 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1aa6c99bcd6b4009912cd8418c13982c is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731497464799/Put/seqid=0 2024-11-13T11:31:05,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39521 is added to blk_1073741840_1016 (size=5275) 2024-11-13T11:31:05,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36651 is added to blk_1073741840_1016 (size=5275) 2024-11-13T11:31:05,604 INFO [M:0;7bf281cf3991:34609 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1aa6c99bcd6b4009912cd8418c13982c 2024-11-13T11:31:05,619 INFO [RS:0;7bf281cf3991:42119 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T11:31:05,619 INFO [RS:0;7bf281cf3991:42119 {}] regionserver.HRegionServer(1031): Exiting; stopping=7bf281cf3991,42119,1731497463895; zookeeper connection closed. 2024-11-13T11:31:05,619 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:31:05,619 DEBUG [pool-947-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42119-0x10038d9ec870001, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:31:05,619 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@10ebe827 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@10ebe827 2024-11-13T11:31:05,619 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T11:31:05,624 DEBUG [M:0;7bf281cf3991:34609 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/57768e36447241558baab68af6638176 is 69, key is 7bf281cf3991,42119,1731497463895/rs:state/1731497464163/Put/seqid=0 2024-11-13T11:31:05,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39521 is added to blk_1073741841_1017 (size=5156) 2024-11-13T11:31:05,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36651 is added to blk_1073741841_1017 (size=5156) 2024-11-13T11:31:05,634 INFO [M:0;7bf281cf3991:34609 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/57768e36447241558baab68af6638176 2024-11-13T11:31:05,659 DEBUG [M:0;7bf281cf3991:34609 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f810a3f7417a4ec4a68baad4d35a7e5e is 52, key is load_balancer_on/state:d/1731497464842/Put/seqid=0 2024-11-13T11:31:05,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39521 is added to blk_1073741842_1018 (size=5056) 2024-11-13T11:31:05,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36651 is added to blk_1073741842_1018 (size=5056) 2024-11-13T11:31:05,664 INFO [M:0;7bf281cf3991:34609 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f810a3f7417a4ec4a68baad4d35a7e5e 2024-11-13T11:31:05,670 DEBUG [M:0;7bf281cf3991:34609 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/811e9b045b774e57a7a2f79e4c696b45 as hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/811e9b045b774e57a7a2f79e4c696b45 2024-11-13T11:31:05,676 INFO [M:0;7bf281cf3991:34609 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/811e9b045b774e57a7a2f79e4c696b45, entries=8, sequenceid=29, filesize=5.5 K 2024-11-13T11:31:05,679 DEBUG [M:0;7bf281cf3991:34609 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1aa6c99bcd6b4009912cd8418c13982c as hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1aa6c99bcd6b4009912cd8418c13982c 2024-11-13T11:31:05,685 INFO [M:0;7bf281cf3991:34609 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1aa6c99bcd6b4009912cd8418c13982c, entries=3, sequenceid=29, filesize=5.2 K 2024-11-13T11:31:05,686 DEBUG [M:0;7bf281cf3991:34609 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/57768e36447241558baab68af6638176 as hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/57768e36447241558baab68af6638176 2024-11-13T11:31:05,691 INFO [M:0;7bf281cf3991:34609 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/57768e36447241558baab68af6638176, entries=1, sequenceid=29, filesize=5.0 K 2024-11-13T11:31:05,692 DEBUG [M:0;7bf281cf3991:34609 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f810a3f7417a4ec4a68baad4d35a7e5e as hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f810a3f7417a4ec4a68baad4d35a7e5e 2024-11-13T11:31:05,699 INFO [M:0;7bf281cf3991:34609 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42785/user/jenkins/test-data/ba09c779-8d31-bda6-6dfc-bc4df5f2a551/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f810a3f7417a4ec4a68baad4d35a7e5e, entries=1, sequenceid=29, filesize=4.9 K 2024-11-13T11:31:05,699 INFO [M:0;7bf281cf3991:34609 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 160ms, sequenceid=29, compaction requested=false 2024-11-13T11:31:05,710 INFO [M:0;7bf281cf3991:34609 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T11:31:05,710 DEBUG [M:0;7bf281cf3991:34609 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731497465539Disabling compacts and flushes for region at 1731497465539Disabling writes for close at 1731497465539Obtaining lock to block concurrent updates at 1731497465539Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731497465539Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731497465539Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731497465540 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731497465540Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731497465562 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731497465562Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731497465578 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731497465598 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731497465598Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731497465609 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731497465623 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731497465623Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731497465639 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731497465658 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731497465658Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5be4189e: reopening flushed file at 1731497465669 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41219545: reopening flushed file at 1731497465676 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@664b3214: reopening flushed file at 1731497465685 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7929599a: reopening flushed file at 1731497465691 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 160ms, sequenceid=29, compaction requested=false at 1731497465700 (+9 ms)Writing region close event to WAL at 1731497465710 (+10 ms)Closed at 1731497465710 2024-11-13T11:31:05,711 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:05,712 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:05,712 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:05,712 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:05,712 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T11:31:05,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36651 is added to blk_1073741830_1006 (size=10311) 2024-11-13T11:31:05,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39521 is added to blk_1073741830_1006 (size=10311) 2024-11-13T11:31:05,715 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T11:31:05,715 INFO [M:0;7bf281cf3991:34609 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T11:31:05,715 INFO [M:0;7bf281cf3991:34609 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34609 2024-11-13T11:31:05,715 INFO [M:0;7bf281cf3991:34609 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T11:31:05,817 INFO [M:0;7bf281cf3991:34609 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T11:31:05,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:31:05,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34609-0x10038d9ec870000, quorum=127.0.0.1:51873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T11:31:05,819 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@47188d26{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:31:05,820 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@29692e24{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:31:05,820 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:31:05,820 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4cc41e8e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:31:05,820 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7df47ec9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/hadoop.log.dir/,STOPPED} 2024-11-13T11:31:05,821 WARN [BP-745397936-172.17.0.2-1731497463299 heartbeating to localhost/127.0.0.1:42785 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:31:05,821 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:31:05,821 WARN [BP-745397936-172.17.0.2-1731497463299 heartbeating to localhost/127.0.0.1:42785 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-745397936-172.17.0.2-1731497463299 (Datanode Uuid 0b34b669-963f-446a-85df-db2115934b2b) service to localhost/127.0.0.1:42785 2024-11-13T11:31:05,821 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:31:05,822 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/cluster_66b1ab15-a2f5-2176-994d-fca4c662e8ed/data/data3/current/BP-745397936-172.17.0.2-1731497463299 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:31:05,822 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/cluster_66b1ab15-a2f5-2176-994d-fca4c662e8ed/data/data4/current/BP-745397936-172.17.0.2-1731497463299 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:31:05,822 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:31:05,831 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@13a6cd2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T11:31:05,831 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@63aa0f5c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:31:05,831 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:31:05,831 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@443fd1e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:31:05,831 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c81f758{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/hadoop.log.dir/,STOPPED} 2024-11-13T11:31:05,832 WARN [BP-745397936-172.17.0.2-1731497463299 heartbeating to localhost/127.0.0.1:42785 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T11:31:05,832 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T11:31:05,832 WARN [BP-745397936-172.17.0.2-1731497463299 heartbeating to localhost/127.0.0.1:42785 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-745397936-172.17.0.2-1731497463299 (Datanode Uuid 17b846f7-7606-478a-9631-6401383d6327) service to localhost/127.0.0.1:42785 2024-11-13T11:31:05,832 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T11:31:05,833 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/cluster_66b1ab15-a2f5-2176-994d-fca4c662e8ed/data/data1/current/BP-745397936-172.17.0.2-1731497463299 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:31:05,833 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/cluster_66b1ab15-a2f5-2176-994d-fca4c662e8ed/data/data2/current/BP-745397936-172.17.0.2-1731497463299 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T11:31:05,833 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T11:31:05,839 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4f69d6d9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T11:31:05,839 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7d0e457e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T11:31:05,840 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T11:31:05,840 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2571c301{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T11:31:05,840 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56dc53d9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8b221e41-1636-067e-9da7-a270c9d5150b/hadoop.log.dir/,STOPPED} 2024-11-13T11:31:05,847 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T11:31:05,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,38159,1731497264714/7bf281cf3991%2C38159%2C1731497264714.1731497264924 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:31:05,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35763/user/jenkins/test-data/985d9234-5a17-a094-2931-aec8c386f718/WALs/7bf281cf3991,34477,1731497263757/7bf281cf3991%2C34477%2C1731497263757.meta.1731497264594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T11:31:05,861 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T11:31:05,870 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=268 (was 226) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:42785 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:42785 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42785 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:42785 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42785 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:42785 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42785 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42785 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=541 (was 518) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=100 (was 74) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3158 (was 3076) - AvailableMemoryMB LEAK? -